sketchbook
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
Classes | Macros | Typedefs | Functions
arm_neon.h File Reference
#include <stdint.h>

Go to the source code of this file.

Classes

struct  int8x8x2_t
 
struct  int8x16x2_t
 
struct  int16x4x2_t
 
struct  int16x8x2_t
 
struct  int32x2x2_t
 
struct  int32x4x2_t
 
struct  int64x1x2_t
 
struct  int64x2x2_t
 
struct  uint8x8x2_t
 
struct  uint8x16x2_t
 
struct  uint16x4x2_t
 
struct  uint16x8x2_t
 
struct  uint32x2x2_t
 
struct  uint32x4x2_t
 
struct  uint64x1x2_t
 
struct  uint64x2x2_t
 
struct  float16x4x2_t
 
struct  float16x8x2_t
 
struct  float32x2x2_t
 
struct  float32x4x2_t
 
struct  poly8x8x2_t
 
struct  poly8x16x2_t
 
struct  poly16x4x2_t
 
struct  poly16x8x2_t
 
struct  int8x8x3_t
 
struct  int8x16x3_t
 
struct  int16x4x3_t
 
struct  int16x8x3_t
 
struct  int32x2x3_t
 
struct  int32x4x3_t
 
struct  int64x1x3_t
 
struct  int64x2x3_t
 
struct  uint8x8x3_t
 
struct  uint8x16x3_t
 
struct  uint16x4x3_t
 
struct  uint16x8x3_t
 
struct  uint32x2x3_t
 
struct  uint32x4x3_t
 
struct  uint64x1x3_t
 
struct  uint64x2x3_t
 
struct  float16x4x3_t
 
struct  float16x8x3_t
 
struct  float32x2x3_t
 
struct  float32x4x3_t
 
struct  poly8x8x3_t
 
struct  poly8x16x3_t
 
struct  poly16x4x3_t
 
struct  poly16x8x3_t
 
struct  int8x8x4_t
 
struct  int8x16x4_t
 
struct  int16x4x4_t
 
struct  int16x8x4_t
 
struct  int32x2x4_t
 
struct  int32x4x4_t
 
struct  int64x1x4_t
 
struct  int64x2x4_t
 
struct  uint8x8x4_t
 
struct  uint8x16x4_t
 
struct  uint16x4x4_t
 
struct  uint16x8x4_t
 
struct  uint32x2x4_t
 
struct  uint32x4x4_t
 
struct  uint64x1x4_t
 
struct  uint64x2x4_t
 
struct  float16x4x4_t
 
struct  float16x8x4_t
 
struct  float32x2x4_t
 
struct  float32x4x4_t
 
struct  poly8x8x4_t
 
struct  poly8x16x4_t
 
struct  poly16x4x4_t
 
struct  poly16x8x4_t
 

Macros

#define __ai   static inline __attribute__((__always_inline__, __nodebug__))
 
#define vcvtq_n_f32_u32(__p0, __p1)
 
#define vcvtq_n_f32_s32(__p0, __p1)
 
#define vcvt_n_f32_u32(__p0, __p1)
 
#define vcvt_n_f32_s32(__p0, __p1)
 
#define vcvtq_n_s32_f32(__p0, __p1)
 
#define vcvt_n_s32_f32(__p0, __p1)
 
#define vcvtq_n_u32_f32(__p0, __p1)
 
#define vcvt_n_u32_f32(__p0, __p1)
 
#define vdup_lane_p8(__p0, __p1)
 
#define vdup_lane_p16(__p0, __p1)
 
#define vdupq_lane_p8(__p0, __p1)
 
#define vdupq_lane_p16(__p0, __p1)
 
#define vdupq_lane_u8(__p0, __p1)
 
#define vdupq_lane_u32(__p0, __p1)
 
#define vdupq_lane_u64(__p0, __p1)
 
#define vdupq_lane_u16(__p0, __p1)
 
#define vdupq_lane_s8(__p0, __p1)
 
#define vdupq_lane_f32(__p0, __p1)
 
#define vdupq_lane_s32(__p0, __p1)
 
#define vdupq_lane_s64(__p0, __p1)
 
#define vdupq_lane_s16(__p0, __p1)
 
#define vdup_lane_u8(__p0, __p1)
 
#define vdup_lane_u32(__p0, __p1)
 
#define vdup_lane_u64(__p0, __p1)
 
#define vdup_lane_u16(__p0, __p1)
 
#define vdup_lane_s8(__p0, __p1)
 
#define vdup_lane_f32(__p0, __p1)
 
#define vdup_lane_s32(__p0, __p1)
 
#define vdup_lane_s64(__p0, __p1)
 
#define vdup_lane_s16(__p0, __p1)
 
#define vdupq_n_f16(__p0)
 
#define vdup_n_f16(__p0)
 
#define vext_p8(__p0, __p1, __p2)
 
#define vext_p16(__p0, __p1, __p2)
 
#define vextq_p8(__p0, __p1, __p2)
 
#define vextq_p16(__p0, __p1, __p2)
 
#define vextq_u8(__p0, __p1, __p2)
 
#define vextq_u32(__p0, __p1, __p2)
 
#define vextq_u64(__p0, __p1, __p2)
 
#define vextq_u16(__p0, __p1, __p2)
 
#define vextq_s8(__p0, __p1, __p2)
 
#define vextq_f32(__p0, __p1, __p2)
 
#define vextq_s32(__p0, __p1, __p2)
 
#define vextq_s64(__p0, __p1, __p2)
 
#define vextq_s16(__p0, __p1, __p2)
 
#define vext_u8(__p0, __p1, __p2)
 
#define vext_u32(__p0, __p1, __p2)
 
#define vext_u64(__p0, __p1, __p2)
 
#define vext_u16(__p0, __p1, __p2)
 
#define vext_s8(__p0, __p1, __p2)
 
#define vext_f32(__p0, __p1, __p2)
 
#define vext_s32(__p0, __p1, __p2)
 
#define vext_s64(__p0, __p1, __p2)
 
#define vext_s16(__p0, __p1, __p2)
 
#define vget_lane_p8(__p0, __p1)
 
#define __noswap_vget_lane_p8(__p0, __p1)
 
#define vget_lane_p16(__p0, __p1)
 
#define __noswap_vget_lane_p16(__p0, __p1)
 
#define vgetq_lane_p8(__p0, __p1)
 
#define __noswap_vgetq_lane_p8(__p0, __p1)
 
#define vgetq_lane_p16(__p0, __p1)
 
#define __noswap_vgetq_lane_p16(__p0, __p1)
 
#define vgetq_lane_u8(__p0, __p1)
 
#define __noswap_vgetq_lane_u8(__p0, __p1)
 
#define vgetq_lane_u32(__p0, __p1)
 
#define __noswap_vgetq_lane_u32(__p0, __p1)
 
#define vgetq_lane_u64(__p0, __p1)
 
#define __noswap_vgetq_lane_u64(__p0, __p1)
 
#define vgetq_lane_u16(__p0, __p1)
 
#define __noswap_vgetq_lane_u16(__p0, __p1)
 
#define vgetq_lane_s8(__p0, __p1)
 
#define __noswap_vgetq_lane_s8(__p0, __p1)
 
#define vgetq_lane_f32(__p0, __p1)
 
#define __noswap_vgetq_lane_f32(__p0, __p1)
 
#define vgetq_lane_s32(__p0, __p1)
 
#define __noswap_vgetq_lane_s32(__p0, __p1)
 
#define vgetq_lane_s64(__p0, __p1)
 
#define __noswap_vgetq_lane_s64(__p0, __p1)
 
#define vgetq_lane_s16(__p0, __p1)
 
#define __noswap_vgetq_lane_s16(__p0, __p1)
 
#define vget_lane_u8(__p0, __p1)
 
#define __noswap_vget_lane_u8(__p0, __p1)
 
#define vget_lane_u32(__p0, __p1)
 
#define __noswap_vget_lane_u32(__p0, __p1)
 
#define vget_lane_u64(__p0, __p1)
 
#define __noswap_vget_lane_u64(__p0, __p1)
 
#define vget_lane_u16(__p0, __p1)
 
#define __noswap_vget_lane_u16(__p0, __p1)
 
#define vget_lane_s8(__p0, __p1)
 
#define __noswap_vget_lane_s8(__p0, __p1)
 
#define vget_lane_f32(__p0, __p1)
 
#define __noswap_vget_lane_f32(__p0, __p1)
 
#define vget_lane_s32(__p0, __p1)
 
#define __noswap_vget_lane_s32(__p0, __p1)
 
#define vget_lane_s64(__p0, __p1)
 
#define __noswap_vget_lane_s64(__p0, __p1)
 
#define vget_lane_s16(__p0, __p1)
 
#define __noswap_vget_lane_s16(__p0, __p1)
 
#define vld1_p8(__p0)
 
#define vld1_p16(__p0)
 
#define vld1q_p8(__p0)
 
#define vld1q_p16(__p0)
 
#define vld1q_u8(__p0)
 
#define vld1q_u32(__p0)
 
#define vld1q_u64(__p0)
 
#define vld1q_u16(__p0)
 
#define vld1q_s8(__p0)
 
#define vld1q_f32(__p0)
 
#define vld1q_f16(__p0)
 
#define vld1q_s32(__p0)
 
#define vld1q_s64(__p0)
 
#define vld1q_s16(__p0)
 
#define vld1_u8(__p0)
 
#define vld1_u32(__p0)
 
#define vld1_u64(__p0)
 
#define vld1_u16(__p0)
 
#define vld1_s8(__p0)
 
#define vld1_f32(__p0)
 
#define vld1_f16(__p0)
 
#define vld1_s32(__p0)
 
#define vld1_s64(__p0)
 
#define vld1_s16(__p0)
 
#define vld1_dup_p8(__p0)
 
#define vld1_dup_p16(__p0)
 
#define vld1q_dup_p8(__p0)
 
#define vld1q_dup_p16(__p0)
 
#define vld1q_dup_u8(__p0)
 
#define vld1q_dup_u32(__p0)
 
#define vld1q_dup_u64(__p0)
 
#define vld1q_dup_u16(__p0)
 
#define vld1q_dup_s8(__p0)
 
#define vld1q_dup_f32(__p0)
 
#define vld1q_dup_f16(__p0)
 
#define vld1q_dup_s32(__p0)
 
#define vld1q_dup_s64(__p0)
 
#define vld1q_dup_s16(__p0)
 
#define vld1_dup_u8(__p0)
 
#define vld1_dup_u32(__p0)
 
#define vld1_dup_u64(__p0)
 
#define vld1_dup_u16(__p0)
 
#define vld1_dup_s8(__p0)
 
#define vld1_dup_f32(__p0)
 
#define vld1_dup_f16(__p0)
 
#define vld1_dup_s32(__p0)
 
#define vld1_dup_s64(__p0)
 
#define vld1_dup_s16(__p0)
 
#define vld1_lane_p8(__p0, __p1, __p2)
 
#define vld1_lane_p16(__p0, __p1, __p2)
 
#define vld1q_lane_p8(__p0, __p1, __p2)
 
#define vld1q_lane_p16(__p0, __p1, __p2)
 
#define vld1q_lane_u8(__p0, __p1, __p2)
 
#define vld1q_lane_u32(__p0, __p1, __p2)
 
#define vld1q_lane_u64(__p0, __p1, __p2)
 
#define vld1q_lane_u16(__p0, __p1, __p2)
 
#define vld1q_lane_s8(__p0, __p1, __p2)
 
#define vld1q_lane_f32(__p0, __p1, __p2)
 
#define vld1q_lane_f16(__p0, __p1, __p2)
 
#define vld1q_lane_s32(__p0, __p1, __p2)
 
#define vld1q_lane_s64(__p0, __p1, __p2)
 
#define vld1q_lane_s16(__p0, __p1, __p2)
 
#define vld1_lane_u8(__p0, __p1, __p2)
 
#define vld1_lane_u32(__p0, __p1, __p2)
 
#define vld1_lane_u64(__p0, __p1, __p2)
 
#define vld1_lane_u16(__p0, __p1, __p2)
 
#define vld1_lane_s8(__p0, __p1, __p2)
 
#define vld1_lane_f32(__p0, __p1, __p2)
 
#define vld1_lane_f16(__p0, __p1, __p2)
 
#define vld1_lane_s32(__p0, __p1, __p2)
 
#define vld1_lane_s64(__p0, __p1, __p2)
 
#define vld1_lane_s16(__p0, __p1, __p2)
 
#define vld2_p8(__p0)
 
#define vld2_p16(__p0)
 
#define vld2q_p8(__p0)
 
#define vld2q_p16(__p0)
 
#define vld2q_u8(__p0)
 
#define vld2q_u32(__p0)
 
#define vld2q_u16(__p0)
 
#define vld2q_s8(__p0)
 
#define vld2q_f32(__p0)
 
#define vld2q_f16(__p0)
 
#define vld2q_s32(__p0)
 
#define vld2q_s16(__p0)
 
#define vld2_u8(__p0)
 
#define vld2_u32(__p0)
 
#define vld2_u64(__p0)
 
#define vld2_u16(__p0)
 
#define vld2_s8(__p0)
 
#define vld2_f32(__p0)
 
#define vld2_f16(__p0)
 
#define vld2_s32(__p0)
 
#define vld2_s64(__p0)
 
#define vld2_s16(__p0)
 
#define vld2_dup_p8(__p0)
 
#define vld2_dup_p16(__p0)
 
#define vld2_dup_u8(__p0)
 
#define vld2_dup_u32(__p0)
 
#define vld2_dup_u64(__p0)
 
#define vld2_dup_u16(__p0)
 
#define vld2_dup_s8(__p0)
 
#define vld2_dup_f32(__p0)
 
#define vld2_dup_f16(__p0)
 
#define vld2_dup_s32(__p0)
 
#define vld2_dup_s64(__p0)
 
#define vld2_dup_s16(__p0)
 
#define vld2_lane_p8(__p0, __p1, __p2)
 
#define vld2_lane_p16(__p0, __p1, __p2)
 
#define vld2q_lane_p16(__p0, __p1, __p2)
 
#define vld2q_lane_u32(__p0, __p1, __p2)
 
#define vld2q_lane_u16(__p0, __p1, __p2)
 
#define vld2q_lane_f32(__p0, __p1, __p2)
 
#define vld2q_lane_f16(__p0, __p1, __p2)
 
#define vld2q_lane_s32(__p0, __p1, __p2)
 
#define vld2q_lane_s16(__p0, __p1, __p2)
 
#define vld2_lane_u8(__p0, __p1, __p2)
 
#define vld2_lane_u32(__p0, __p1, __p2)
 
#define vld2_lane_u16(__p0, __p1, __p2)
 
#define vld2_lane_s8(__p0, __p1, __p2)
 
#define vld2_lane_f32(__p0, __p1, __p2)
 
#define vld2_lane_f16(__p0, __p1, __p2)
 
#define vld2_lane_s32(__p0, __p1, __p2)
 
#define vld2_lane_s16(__p0, __p1, __p2)
 
#define vld3_p8(__p0)
 
#define vld3_p16(__p0)
 
#define vld3q_p8(__p0)
 
#define vld3q_p16(__p0)
 
#define vld3q_u8(__p0)
 
#define vld3q_u32(__p0)
 
#define vld3q_u16(__p0)
 
#define vld3q_s8(__p0)
 
#define vld3q_f32(__p0)
 
#define vld3q_f16(__p0)
 
#define vld3q_s32(__p0)
 
#define vld3q_s16(__p0)
 
#define vld3_u8(__p0)
 
#define vld3_u32(__p0)
 
#define vld3_u64(__p0)
 
#define vld3_u16(__p0)
 
#define vld3_s8(__p0)
 
#define vld3_f32(__p0)
 
#define vld3_f16(__p0)
 
#define vld3_s32(__p0)
 
#define vld3_s64(__p0)
 
#define vld3_s16(__p0)
 
#define vld3_dup_p8(__p0)
 
#define vld3_dup_p16(__p0)
 
#define vld3_dup_u8(__p0)
 
#define vld3_dup_u32(__p0)
 
#define vld3_dup_u64(__p0)
 
#define vld3_dup_u16(__p0)
 
#define vld3_dup_s8(__p0)
 
#define vld3_dup_f32(__p0)
 
#define vld3_dup_f16(__p0)
 
#define vld3_dup_s32(__p0)
 
#define vld3_dup_s64(__p0)
 
#define vld3_dup_s16(__p0)
 
#define vld3_lane_p8(__p0, __p1, __p2)
 
#define vld3_lane_p16(__p0, __p1, __p2)
 
#define vld3q_lane_p16(__p0, __p1, __p2)
 
#define vld3q_lane_u32(__p0, __p1, __p2)
 
#define vld3q_lane_u16(__p0, __p1, __p2)
 
#define vld3q_lane_f32(__p0, __p1, __p2)
 
#define vld3q_lane_f16(__p0, __p1, __p2)
 
#define vld3q_lane_s32(__p0, __p1, __p2)
 
#define vld3q_lane_s16(__p0, __p1, __p2)
 
#define vld3_lane_u8(__p0, __p1, __p2)
 
#define vld3_lane_u32(__p0, __p1, __p2)
 
#define vld3_lane_u16(__p0, __p1, __p2)
 
#define vld3_lane_s8(__p0, __p1, __p2)
 
#define vld3_lane_f32(__p0, __p1, __p2)
 
#define vld3_lane_f16(__p0, __p1, __p2)
 
#define vld3_lane_s32(__p0, __p1, __p2)
 
#define vld3_lane_s16(__p0, __p1, __p2)
 
#define vld4_p8(__p0)
 
#define vld4_p16(__p0)
 
#define vld4q_p8(__p0)
 
#define vld4q_p16(__p0)
 
#define vld4q_u8(__p0)
 
#define vld4q_u32(__p0)
 
#define vld4q_u16(__p0)
 
#define vld4q_s8(__p0)
 
#define vld4q_f32(__p0)
 
#define vld4q_f16(__p0)
 
#define vld4q_s32(__p0)
 
#define vld4q_s16(__p0)
 
#define vld4_u8(__p0)
 
#define vld4_u32(__p0)
 
#define vld4_u64(__p0)
 
#define vld4_u16(__p0)
 
#define vld4_s8(__p0)
 
#define vld4_f32(__p0)
 
#define vld4_f16(__p0)
 
#define vld4_s32(__p0)
 
#define vld4_s64(__p0)
 
#define vld4_s16(__p0)
 
#define vld4_dup_p8(__p0)
 
#define vld4_dup_p16(__p0)
 
#define vld4_dup_u8(__p0)
 
#define vld4_dup_u32(__p0)
 
#define vld4_dup_u64(__p0)
 
#define vld4_dup_u16(__p0)
 
#define vld4_dup_s8(__p0)
 
#define vld4_dup_f32(__p0)
 
#define vld4_dup_f16(__p0)
 
#define vld4_dup_s32(__p0)
 
#define vld4_dup_s64(__p0)
 
#define vld4_dup_s16(__p0)
 
#define vld4_lane_p8(__p0, __p1, __p2)
 
#define vld4_lane_p16(__p0, __p1, __p2)
 
#define vld4q_lane_p16(__p0, __p1, __p2)
 
#define vld4q_lane_u32(__p0, __p1, __p2)
 
#define vld4q_lane_u16(__p0, __p1, __p2)
 
#define vld4q_lane_f32(__p0, __p1, __p2)
 
#define vld4q_lane_f16(__p0, __p1, __p2)
 
#define vld4q_lane_s32(__p0, __p1, __p2)
 
#define vld4q_lane_s16(__p0, __p1, __p2)
 
#define vld4_lane_u8(__p0, __p1, __p2)
 
#define vld4_lane_u32(__p0, __p1, __p2)
 
#define vld4_lane_u16(__p0, __p1, __p2)
 
#define vld4_lane_s8(__p0, __p1, __p2)
 
#define vld4_lane_f32(__p0, __p1, __p2)
 
#define vld4_lane_f16(__p0, __p1, __p2)
 
#define vld4_lane_s32(__p0, __p1, __p2)
 
#define vld4_lane_s16(__p0, __p1, __p2)
 
#define vmlaq_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmlaq_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmlaq_lane_f32(__p0, __p1, __p2, __p3)
 
#define vmlaq_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmlaq_lane_s16(__p0, __p1, __p2, __p3)
 
#define vmla_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmla_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmla_lane_f32(__p0, __p1, __p2, __p3)
 
#define vmla_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmla_lane_s16(__p0, __p1, __p2, __p3)
 
#define vmlsq_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmlsq_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmlsq_lane_f32(__p0, __p1, __p2, __p3)
 
#define vmlsq_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmlsq_lane_s16(__p0, __p1, __p2, __p3)
 
#define vmls_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmls_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmls_lane_f32(__p0, __p1, __p2, __p3)
 
#define vmls_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmls_lane_s16(__p0, __p1, __p2, __p3)
 
#define vmovq_n_f16(__p0)
 
#define vmov_n_f16(__p0)
 
#define vmulq_lane_u32(__p0, __p1, __p2)
 
#define vmulq_lane_u16(__p0, __p1, __p2)
 
#define vmulq_lane_f32(__p0, __p1, __p2)
 
#define vmulq_lane_s32(__p0, __p1, __p2)
 
#define vmulq_lane_s16(__p0, __p1, __p2)
 
#define vmul_lane_u32(__p0, __p1, __p2)
 
#define vmul_lane_u16(__p0, __p1, __p2)
 
#define vmul_lane_f32(__p0, __p1, __p2)
 
#define vmul_lane_s32(__p0, __p1, __p2)
 
#define vmul_lane_s16(__p0, __p1, __p2)
 
#define vmull_lane_u32(__p0, __p1, __p2)
 
#define vmull_lane_u16(__p0, __p1, __p2)
 
#define vmull_lane_s32(__p0, __p1, __p2)
 
#define vmull_lane_s16(__p0, __p1, __p2)
 
#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3)
 
#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3)
 
#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3)
 
#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3)
 
#define vqdmulhq_lane_s32(__p0, __p1, __p2)
 
#define vqdmulhq_lane_s16(__p0, __p1, __p2)
 
#define vqdmulh_lane_s32(__p0, __p1, __p2)
 
#define vqdmulh_lane_s16(__p0, __p1, __p2)
 
#define vqdmull_lane_s32(__p0, __p1, __p2)
 
#define vqdmull_lane_s16(__p0, __p1, __p2)
 
#define vqrdmulhq_lane_s32(__p0, __p1, __p2)
 
#define vqrdmulhq_lane_s16(__p0, __p1, __p2)
 
#define vqrdmulh_lane_s32(__p0, __p1, __p2)
 
#define vqrdmulh_lane_s16(__p0, __p1, __p2)
 
#define vqrshrn_n_u32(__p0, __p1)
 
#define __noswap_vqrshrn_n_u32(__p0, __p1)
 
#define vqrshrn_n_u64(__p0, __p1)
 
#define __noswap_vqrshrn_n_u64(__p0, __p1)
 
#define vqrshrn_n_u16(__p0, __p1)
 
#define __noswap_vqrshrn_n_u16(__p0, __p1)
 
#define vqrshrn_n_s32(__p0, __p1)
 
#define __noswap_vqrshrn_n_s32(__p0, __p1)
 
#define vqrshrn_n_s64(__p0, __p1)
 
#define __noswap_vqrshrn_n_s64(__p0, __p1)
 
#define vqrshrn_n_s16(__p0, __p1)
 
#define __noswap_vqrshrn_n_s16(__p0, __p1)
 
#define vqrshrun_n_s32(__p0, __p1)
 
#define __noswap_vqrshrun_n_s32(__p0, __p1)
 
#define vqrshrun_n_s64(__p0, __p1)
 
#define __noswap_vqrshrun_n_s64(__p0, __p1)
 
#define vqrshrun_n_s16(__p0, __p1)
 
#define __noswap_vqrshrun_n_s16(__p0, __p1)
 
#define vqshlq_n_u8(__p0, __p1)
 
#define vqshlq_n_u32(__p0, __p1)
 
#define vqshlq_n_u64(__p0, __p1)
 
#define vqshlq_n_u16(__p0, __p1)
 
#define vqshlq_n_s8(__p0, __p1)
 
#define vqshlq_n_s32(__p0, __p1)
 
#define vqshlq_n_s64(__p0, __p1)
 
#define vqshlq_n_s16(__p0, __p1)
 
#define vqshl_n_u8(__p0, __p1)
 
#define vqshl_n_u32(__p0, __p1)
 
#define vqshl_n_u64(__p0, __p1)
 
#define vqshl_n_u16(__p0, __p1)
 
#define vqshl_n_s8(__p0, __p1)
 
#define vqshl_n_s32(__p0, __p1)
 
#define vqshl_n_s64(__p0, __p1)
 
#define vqshl_n_s16(__p0, __p1)
 
#define vqshluq_n_s8(__p0, __p1)
 
#define vqshluq_n_s32(__p0, __p1)
 
#define vqshluq_n_s64(__p0, __p1)
 
#define vqshluq_n_s16(__p0, __p1)
 
#define vqshlu_n_s8(__p0, __p1)
 
#define vqshlu_n_s32(__p0, __p1)
 
#define vqshlu_n_s64(__p0, __p1)
 
#define vqshlu_n_s16(__p0, __p1)
 
#define vqshrn_n_u32(__p0, __p1)
 
#define __noswap_vqshrn_n_u32(__p0, __p1)
 
#define vqshrn_n_u64(__p0, __p1)
 
#define __noswap_vqshrn_n_u64(__p0, __p1)
 
#define vqshrn_n_u16(__p0, __p1)
 
#define __noswap_vqshrn_n_u16(__p0, __p1)
 
#define vqshrn_n_s32(__p0, __p1)
 
#define __noswap_vqshrn_n_s32(__p0, __p1)
 
#define vqshrn_n_s64(__p0, __p1)
 
#define __noswap_vqshrn_n_s64(__p0, __p1)
 
#define vqshrn_n_s16(__p0, __p1)
 
#define __noswap_vqshrn_n_s16(__p0, __p1)
 
#define vqshrun_n_s32(__p0, __p1)
 
#define __noswap_vqshrun_n_s32(__p0, __p1)
 
#define vqshrun_n_s64(__p0, __p1)
 
#define __noswap_vqshrun_n_s64(__p0, __p1)
 
#define vqshrun_n_s16(__p0, __p1)
 
#define __noswap_vqshrun_n_s16(__p0, __p1)
 
#define vrshrq_n_u8(__p0, __p1)
 
#define vrshrq_n_u32(__p0, __p1)
 
#define vrshrq_n_u64(__p0, __p1)
 
#define vrshrq_n_u16(__p0, __p1)
 
#define vrshrq_n_s8(__p0, __p1)
 
#define vrshrq_n_s32(__p0, __p1)
 
#define vrshrq_n_s64(__p0, __p1)
 
#define vrshrq_n_s16(__p0, __p1)
 
#define vrshr_n_u8(__p0, __p1)
 
#define vrshr_n_u32(__p0, __p1)
 
#define vrshr_n_u64(__p0, __p1)
 
#define vrshr_n_u16(__p0, __p1)
 
#define vrshr_n_s8(__p0, __p1)
 
#define vrshr_n_s32(__p0, __p1)
 
#define vrshr_n_s64(__p0, __p1)
 
#define vrshr_n_s16(__p0, __p1)
 
#define vrshrn_n_u32(__p0, __p1)
 
#define __noswap_vrshrn_n_u32(__p0, __p1)
 
#define vrshrn_n_u64(__p0, __p1)
 
#define __noswap_vrshrn_n_u64(__p0, __p1)
 
#define vrshrn_n_u16(__p0, __p1)
 
#define __noswap_vrshrn_n_u16(__p0, __p1)
 
#define vrshrn_n_s32(__p0, __p1)
 
#define __noswap_vrshrn_n_s32(__p0, __p1)
 
#define vrshrn_n_s64(__p0, __p1)
 
#define __noswap_vrshrn_n_s64(__p0, __p1)
 
#define vrshrn_n_s16(__p0, __p1)
 
#define __noswap_vrshrn_n_s16(__p0, __p1)
 
#define vrsraq_n_u8(__p0, __p1, __p2)
 
#define vrsraq_n_u32(__p0, __p1, __p2)
 
#define vrsraq_n_u64(__p0, __p1, __p2)
 
#define vrsraq_n_u16(__p0, __p1, __p2)
 
#define vrsraq_n_s8(__p0, __p1, __p2)
 
#define vrsraq_n_s32(__p0, __p1, __p2)
 
#define vrsraq_n_s64(__p0, __p1, __p2)
 
#define vrsraq_n_s16(__p0, __p1, __p2)
 
#define vrsra_n_u8(__p0, __p1, __p2)
 
#define vrsra_n_u32(__p0, __p1, __p2)
 
#define vrsra_n_u64(__p0, __p1, __p2)
 
#define vrsra_n_u16(__p0, __p1, __p2)
 
#define vrsra_n_s8(__p0, __p1, __p2)
 
#define vrsra_n_s32(__p0, __p1, __p2)
 
#define vrsra_n_s64(__p0, __p1, __p2)
 
#define vrsra_n_s16(__p0, __p1, __p2)
 
#define vset_lane_p8(__p0, __p1, __p2)
 
#define __noswap_vset_lane_p8(__p0, __p1, __p2)
 
#define vset_lane_p16(__p0, __p1, __p2)
 
#define __noswap_vset_lane_p16(__p0, __p1, __p2)
 
#define vsetq_lane_p8(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_p8(__p0, __p1, __p2)
 
#define vsetq_lane_p16(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_p16(__p0, __p1, __p2)
 
#define vsetq_lane_u8(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_u8(__p0, __p1, __p2)
 
#define vsetq_lane_u32(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_u32(__p0, __p1, __p2)
 
#define vsetq_lane_u64(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_u64(__p0, __p1, __p2)
 
#define vsetq_lane_u16(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_u16(__p0, __p1, __p2)
 
#define vsetq_lane_s8(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_s8(__p0, __p1, __p2)
 
#define vsetq_lane_f32(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_f32(__p0, __p1, __p2)
 
#define vsetq_lane_s32(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_s32(__p0, __p1, __p2)
 
#define vsetq_lane_s64(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_s64(__p0, __p1, __p2)
 
#define vsetq_lane_s16(__p0, __p1, __p2)
 
#define __noswap_vsetq_lane_s16(__p0, __p1, __p2)
 
#define vset_lane_u8(__p0, __p1, __p2)
 
#define __noswap_vset_lane_u8(__p0, __p1, __p2)
 
#define vset_lane_u32(__p0, __p1, __p2)
 
#define __noswap_vset_lane_u32(__p0, __p1, __p2)
 
#define vset_lane_u64(__p0, __p1, __p2)
 
#define __noswap_vset_lane_u64(__p0, __p1, __p2)
 
#define vset_lane_u16(__p0, __p1, __p2)
 
#define __noswap_vset_lane_u16(__p0, __p1, __p2)
 
#define vset_lane_s8(__p0, __p1, __p2)
 
#define __noswap_vset_lane_s8(__p0, __p1, __p2)
 
#define vset_lane_f32(__p0, __p1, __p2)
 
#define __noswap_vset_lane_f32(__p0, __p1, __p2)
 
#define vset_lane_s32(__p0, __p1, __p2)
 
#define __noswap_vset_lane_s32(__p0, __p1, __p2)
 
#define vset_lane_s64(__p0, __p1, __p2)
 
#define __noswap_vset_lane_s64(__p0, __p1, __p2)
 
#define vset_lane_s16(__p0, __p1, __p2)
 
#define __noswap_vset_lane_s16(__p0, __p1, __p2)
 
#define vshlq_n_u8(__p0, __p1)
 
#define vshlq_n_u32(__p0, __p1)
 
#define vshlq_n_u64(__p0, __p1)
 
#define vshlq_n_u16(__p0, __p1)
 
#define vshlq_n_s8(__p0, __p1)
 
#define vshlq_n_s32(__p0, __p1)
 
#define vshlq_n_s64(__p0, __p1)
 
#define vshlq_n_s16(__p0, __p1)
 
#define vshl_n_u8(__p0, __p1)
 
#define vshl_n_u32(__p0, __p1)
 
#define vshl_n_u64(__p0, __p1)
 
#define vshl_n_u16(__p0, __p1)
 
#define vshl_n_s8(__p0, __p1)
 
#define vshl_n_s32(__p0, __p1)
 
#define vshl_n_s64(__p0, __p1)
 
#define vshl_n_s16(__p0, __p1)
 
#define vshll_n_u8(__p0, __p1)
 
#define __noswap_vshll_n_u8(__p0, __p1)
 
#define vshll_n_u32(__p0, __p1)
 
#define __noswap_vshll_n_u32(__p0, __p1)
 
#define vshll_n_u16(__p0, __p1)
 
#define __noswap_vshll_n_u16(__p0, __p1)
 
#define vshll_n_s8(__p0, __p1)
 
#define __noswap_vshll_n_s8(__p0, __p1)
 
#define vshll_n_s32(__p0, __p1)
 
#define __noswap_vshll_n_s32(__p0, __p1)
 
#define vshll_n_s16(__p0, __p1)
 
#define __noswap_vshll_n_s16(__p0, __p1)
 
#define vshrq_n_u8(__p0, __p1)
 
#define vshrq_n_u32(__p0, __p1)
 
#define vshrq_n_u64(__p0, __p1)
 
#define vshrq_n_u16(__p0, __p1)
 
#define vshrq_n_s8(__p0, __p1)
 
#define vshrq_n_s32(__p0, __p1)
 
#define vshrq_n_s64(__p0, __p1)
 
#define vshrq_n_s16(__p0, __p1)
 
#define vshr_n_u8(__p0, __p1)
 
#define vshr_n_u32(__p0, __p1)
 
#define vshr_n_u64(__p0, __p1)
 
#define vshr_n_u16(__p0, __p1)
 
#define vshr_n_s8(__p0, __p1)
 
#define vshr_n_s32(__p0, __p1)
 
#define vshr_n_s64(__p0, __p1)
 
#define vshr_n_s16(__p0, __p1)
 
#define vshrn_n_u32(__p0, __p1)
 
#define __noswap_vshrn_n_u32(__p0, __p1)
 
#define vshrn_n_u64(__p0, __p1)
 
#define __noswap_vshrn_n_u64(__p0, __p1)
 
#define vshrn_n_u16(__p0, __p1)
 
#define __noswap_vshrn_n_u16(__p0, __p1)
 
#define vshrn_n_s32(__p0, __p1)
 
#define __noswap_vshrn_n_s32(__p0, __p1)
 
#define vshrn_n_s64(__p0, __p1)
 
#define __noswap_vshrn_n_s64(__p0, __p1)
 
#define vshrn_n_s16(__p0, __p1)
 
#define __noswap_vshrn_n_s16(__p0, __p1)
 
#define vsli_n_p8(__p0, __p1, __p2)
 
#define vsli_n_p16(__p0, __p1, __p2)
 
#define vsliq_n_p8(__p0, __p1, __p2)
 
#define vsliq_n_p16(__p0, __p1, __p2)
 
#define vsliq_n_u8(__p0, __p1, __p2)
 
#define vsliq_n_u32(__p0, __p1, __p2)
 
#define vsliq_n_u64(__p0, __p1, __p2)
 
#define vsliq_n_u16(__p0, __p1, __p2)
 
#define vsliq_n_s8(__p0, __p1, __p2)
 
#define vsliq_n_s32(__p0, __p1, __p2)
 
#define vsliq_n_s64(__p0, __p1, __p2)
 
#define vsliq_n_s16(__p0, __p1, __p2)
 
#define vsli_n_u8(__p0, __p1, __p2)
 
#define vsli_n_u32(__p0, __p1, __p2)
 
#define vsli_n_u64(__p0, __p1, __p2)
 
#define vsli_n_u16(__p0, __p1, __p2)
 
#define vsli_n_s8(__p0, __p1, __p2)
 
#define vsli_n_s32(__p0, __p1, __p2)
 
#define vsli_n_s64(__p0, __p1, __p2)
 
#define vsli_n_s16(__p0, __p1, __p2)
 
#define vsraq_n_u8(__p0, __p1, __p2)
 
#define vsraq_n_u32(__p0, __p1, __p2)
 
#define vsraq_n_u64(__p0, __p1, __p2)
 
#define vsraq_n_u16(__p0, __p1, __p2)
 
#define vsraq_n_s8(__p0, __p1, __p2)
 
#define vsraq_n_s32(__p0, __p1, __p2)
 
#define vsraq_n_s64(__p0, __p1, __p2)
 
#define vsraq_n_s16(__p0, __p1, __p2)
 
#define vsra_n_u8(__p0, __p1, __p2)
 
#define vsra_n_u32(__p0, __p1, __p2)
 
#define vsra_n_u64(__p0, __p1, __p2)
 
#define vsra_n_u16(__p0, __p1, __p2)
 
#define vsra_n_s8(__p0, __p1, __p2)
 
#define vsra_n_s32(__p0, __p1, __p2)
 
#define vsra_n_s64(__p0, __p1, __p2)
 
#define vsra_n_s16(__p0, __p1, __p2)
 
#define vsri_n_p8(__p0, __p1, __p2)
 
#define vsri_n_p16(__p0, __p1, __p2)
 
#define vsriq_n_p8(__p0, __p1, __p2)
 
#define vsriq_n_p16(__p0, __p1, __p2)
 
#define vsriq_n_u8(__p0, __p1, __p2)
 
#define vsriq_n_u32(__p0, __p1, __p2)
 
#define vsriq_n_u64(__p0, __p1, __p2)
 
#define vsriq_n_u16(__p0, __p1, __p2)
 
#define vsriq_n_s8(__p0, __p1, __p2)
 
#define vsriq_n_s32(__p0, __p1, __p2)
 
#define vsriq_n_s64(__p0, __p1, __p2)
 
#define vsriq_n_s16(__p0, __p1, __p2)
 
#define vsri_n_u8(__p0, __p1, __p2)
 
#define vsri_n_u32(__p0, __p1, __p2)
 
#define vsri_n_u64(__p0, __p1, __p2)
 
#define vsri_n_u16(__p0, __p1, __p2)
 
#define vsri_n_s8(__p0, __p1, __p2)
 
#define vsri_n_s32(__p0, __p1, __p2)
 
#define vsri_n_s64(__p0, __p1, __p2)
 
#define vsri_n_s16(__p0, __p1, __p2)
 
#define vst1_p8(__p0, __p1)
 
#define vst1_p16(__p0, __p1)
 
#define vst1q_p8(__p0, __p1)
 
#define vst1q_p16(__p0, __p1)
 
#define vst1q_u8(__p0, __p1)
 
#define vst1q_u32(__p0, __p1)
 
#define vst1q_u64(__p0, __p1)
 
#define vst1q_u16(__p0, __p1)
 
#define vst1q_s8(__p0, __p1)
 
#define vst1q_f32(__p0, __p1)
 
#define vst1q_f16(__p0, __p1)
 
#define vst1q_s32(__p0, __p1)
 
#define vst1q_s64(__p0, __p1)
 
#define vst1q_s16(__p0, __p1)
 
#define vst1_u8(__p0, __p1)
 
#define vst1_u32(__p0, __p1)
 
#define vst1_u64(__p0, __p1)
 
#define vst1_u16(__p0, __p1)
 
#define vst1_s8(__p0, __p1)
 
#define vst1_f32(__p0, __p1)
 
#define vst1_f16(__p0, __p1)
 
#define vst1_s32(__p0, __p1)
 
#define vst1_s64(__p0, __p1)
 
#define vst1_s16(__p0, __p1)
 
#define vst1_lane_p8(__p0, __p1, __p2)
 
#define vst1_lane_p16(__p0, __p1, __p2)
 
#define vst1q_lane_p8(__p0, __p1, __p2)
 
#define vst1q_lane_p16(__p0, __p1, __p2)
 
#define vst1q_lane_u8(__p0, __p1, __p2)
 
#define vst1q_lane_u32(__p0, __p1, __p2)
 
#define vst1q_lane_u64(__p0, __p1, __p2)
 
#define vst1q_lane_u16(__p0, __p1, __p2)
 
#define vst1q_lane_s8(__p0, __p1, __p2)
 
#define vst1q_lane_f32(__p0, __p1, __p2)
 
#define vst1q_lane_f16(__p0, __p1, __p2)
 
#define vst1q_lane_s32(__p0, __p1, __p2)
 
#define vst1q_lane_s64(__p0, __p1, __p2)
 
#define vst1q_lane_s16(__p0, __p1, __p2)
 
#define vst1_lane_u8(__p0, __p1, __p2)
 
#define vst1_lane_u32(__p0, __p1, __p2)
 
#define vst1_lane_u64(__p0, __p1, __p2)
 
#define vst1_lane_u16(__p0, __p1, __p2)
 
#define vst1_lane_s8(__p0, __p1, __p2)
 
#define vst1_lane_f32(__p0, __p1, __p2)
 
#define vst1_lane_f16(__p0, __p1, __p2)
 
#define vst1_lane_s32(__p0, __p1, __p2)
 
#define vst1_lane_s64(__p0, __p1, __p2)
 
#define vst1_lane_s16(__p0, __p1, __p2)
 
#define vst2_p8(__p0, __p1)
 
#define vst2_p16(__p0, __p1)
 
#define vst2q_p8(__p0, __p1)
 
#define vst2q_p16(__p0, __p1)
 
#define vst2q_u8(__p0, __p1)
 
#define vst2q_u32(__p0, __p1)
 
#define vst2q_u16(__p0, __p1)
 
#define vst2q_s8(__p0, __p1)
 
#define vst2q_f32(__p0, __p1)
 
#define vst2q_f16(__p0, __p1)
 
#define vst2q_s32(__p0, __p1)
 
#define vst2q_s16(__p0, __p1)
 
#define vst2_u8(__p0, __p1)
 
#define vst2_u32(__p0, __p1)
 
#define vst2_u64(__p0, __p1)
 
#define vst2_u16(__p0, __p1)
 
#define vst2_s8(__p0, __p1)
 
#define vst2_f32(__p0, __p1)
 
#define vst2_f16(__p0, __p1)
 
#define vst2_s32(__p0, __p1)
 
#define vst2_s64(__p0, __p1)
 
#define vst2_s16(__p0, __p1)
 
#define vst2_lane_p8(__p0, __p1, __p2)
 
#define vst2_lane_p16(__p0, __p1, __p2)
 
#define vst2q_lane_p16(__p0, __p1, __p2)
 
#define vst2q_lane_u32(__p0, __p1, __p2)
 
#define vst2q_lane_u16(__p0, __p1, __p2)
 
#define vst2q_lane_f32(__p0, __p1, __p2)
 
#define vst2q_lane_f16(__p0, __p1, __p2)
 
#define vst2q_lane_s32(__p0, __p1, __p2)
 
#define vst2q_lane_s16(__p0, __p1, __p2)
 
#define vst2_lane_u8(__p0, __p1, __p2)
 
#define vst2_lane_u32(__p0, __p1, __p2)
 
#define vst2_lane_u16(__p0, __p1, __p2)
 
#define vst2_lane_s8(__p0, __p1, __p2)
 
#define vst2_lane_f32(__p0, __p1, __p2)
 
#define vst2_lane_f16(__p0, __p1, __p2)
 
#define vst2_lane_s32(__p0, __p1, __p2)
 
#define vst2_lane_s16(__p0, __p1, __p2)
 
#define vst3_p8(__p0, __p1)
 
#define vst3_p16(__p0, __p1)
 
#define vst3q_p8(__p0, __p1)
 
#define vst3q_p16(__p0, __p1)
 
#define vst3q_u8(__p0, __p1)
 
#define vst3q_u32(__p0, __p1)
 
#define vst3q_u16(__p0, __p1)
 
#define vst3q_s8(__p0, __p1)
 
#define vst3q_f32(__p0, __p1)
 
#define vst3q_f16(__p0, __p1)
 
#define vst3q_s32(__p0, __p1)
 
#define vst3q_s16(__p0, __p1)
 
#define vst3_u8(__p0, __p1)
 
#define vst3_u32(__p0, __p1)
 
#define vst3_u64(__p0, __p1)
 
#define vst3_u16(__p0, __p1)
 
#define vst3_s8(__p0, __p1)
 
#define vst3_f32(__p0, __p1)
 
#define vst3_f16(__p0, __p1)
 
#define vst3_s32(__p0, __p1)
 
#define vst3_s64(__p0, __p1)
 
#define vst3_s16(__p0, __p1)
 
#define vst3_lane_p8(__p0, __p1, __p2)
 
#define vst3_lane_p16(__p0, __p1, __p2)
 
#define vst3q_lane_p16(__p0, __p1, __p2)
 
#define vst3q_lane_u32(__p0, __p1, __p2)
 
#define vst3q_lane_u16(__p0, __p1, __p2)
 
#define vst3q_lane_f32(__p0, __p1, __p2)
 
#define vst3q_lane_f16(__p0, __p1, __p2)
 
#define vst3q_lane_s32(__p0, __p1, __p2)
 
#define vst3q_lane_s16(__p0, __p1, __p2)
 
#define vst3_lane_u8(__p0, __p1, __p2)
 
#define vst3_lane_u32(__p0, __p1, __p2)
 
#define vst3_lane_u16(__p0, __p1, __p2)
 
#define vst3_lane_s8(__p0, __p1, __p2)
 
#define vst3_lane_f32(__p0, __p1, __p2)
 
#define vst3_lane_f16(__p0, __p1, __p2)
 
#define vst3_lane_s32(__p0, __p1, __p2)
 
#define vst3_lane_s16(__p0, __p1, __p2)
 
#define vst4_p8(__p0, __p1)
 
#define vst4_p16(__p0, __p1)
 
#define vst4q_p8(__p0, __p1)
 
#define vst4q_p16(__p0, __p1)
 
#define vst4q_u8(__p0, __p1)
 
#define vst4q_u32(__p0, __p1)
 
#define vst4q_u16(__p0, __p1)
 
#define vst4q_s8(__p0, __p1)
 
#define vst4q_f32(__p0, __p1)
 
#define vst4q_f16(__p0, __p1)
 
#define vst4q_s32(__p0, __p1)
 
#define vst4q_s16(__p0, __p1)
 
#define vst4_u8(__p0, __p1)
 
#define vst4_u32(__p0, __p1)
 
#define vst4_u64(__p0, __p1)
 
#define vst4_u16(__p0, __p1)
 
#define vst4_s8(__p0, __p1)
 
#define vst4_f32(__p0, __p1)
 
#define vst4_f16(__p0, __p1)
 
#define vst4_s32(__p0, __p1)
 
#define vst4_s64(__p0, __p1)
 
#define vst4_s16(__p0, __p1)
 
#define vst4_lane_p8(__p0, __p1, __p2)
 
#define vst4_lane_p16(__p0, __p1, __p2)
 
#define vst4q_lane_p16(__p0, __p1, __p2)
 
#define vst4q_lane_u32(__p0, __p1, __p2)
 
#define vst4q_lane_u16(__p0, __p1, __p2)
 
#define vst4q_lane_f32(__p0, __p1, __p2)
 
#define vst4q_lane_f16(__p0, __p1, __p2)
 
#define vst4q_lane_s32(__p0, __p1, __p2)
 
#define vst4q_lane_s16(__p0, __p1, __p2)
 
#define vst4_lane_u8(__p0, __p1, __p2)
 
#define vst4_lane_u32(__p0, __p1, __p2)
 
#define vst4_lane_u16(__p0, __p1, __p2)
 
#define vst4_lane_s8(__p0, __p1, __p2)
 
#define vst4_lane_f32(__p0, __p1, __p2)
 
#define vst4_lane_f16(__p0, __p1, __p2)
 
#define vst4_lane_s32(__p0, __p1, __p2)
 
#define vst4_lane_s16(__p0, __p1, __p2)
 
#define vget_lane_f16(__p0_243, __p1_243)
 
#define vgetq_lane_f16(__p0_245, __p1_245)
 
#define vmlal_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmlal_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmlal_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmlal_lane_s16(__p0, __p1, __p2, __p3)
 
#define vmlsl_lane_u32(__p0, __p1, __p2, __p3)
 
#define vmlsl_lane_u16(__p0, __p1, __p2, __p3)
 
#define vmlsl_lane_s32(__p0, __p1, __p2, __p3)
 
#define vmlsl_lane_s16(__p0, __p1, __p2, __p3)
 
#define vset_lane_f16(__p0_247, __p1_247, __p2_247)
 
#define vsetq_lane_f16(__p0_249, __p1_249, __p2_249)
 

Typedefs

typedef float float32_t
 
typedef __fp16 float16_t
 
typedef int8_t poly8_t
 
typedef int16_t poly16_t
 
typedef struct int8x8x2_t int8x8x2_t
 
typedef struct int8x16x2_t int8x16x2_t
 
typedef struct int16x4x2_t int16x4x2_t
 
typedef struct int16x8x2_t int16x8x2_t
 
typedef struct int32x2x2_t int32x2x2_t
 
typedef struct int32x4x2_t int32x4x2_t
 
typedef struct int64x1x2_t int64x1x2_t
 
typedef struct int64x2x2_t int64x2x2_t
 
typedef struct uint8x8x2_t uint8x8x2_t
 
typedef struct uint8x16x2_t uint8x16x2_t
 
typedef struct uint16x4x2_t uint16x4x2_t
 
typedef struct uint16x8x2_t uint16x8x2_t
 
typedef struct uint32x2x2_t uint32x2x2_t
 
typedef struct uint32x4x2_t uint32x4x2_t
 
typedef struct uint64x1x2_t uint64x1x2_t
 
typedef struct uint64x2x2_t uint64x2x2_t
 
typedef struct float16x4x2_t float16x4x2_t
 
typedef struct float16x8x2_t float16x8x2_t
 
typedef struct float32x2x2_t float32x2x2_t
 
typedef struct float32x4x2_t float32x4x2_t
 
typedef struct poly8x8x2_t poly8x8x2_t
 
typedef struct poly8x16x2_t poly8x16x2_t
 
typedef struct poly16x4x2_t poly16x4x2_t
 
typedef struct poly16x8x2_t poly16x8x2_t
 
typedef struct int8x8x3_t int8x8x3_t
 
typedef struct int8x16x3_t int8x16x3_t
 
typedef struct int16x4x3_t int16x4x3_t
 
typedef struct int16x8x3_t int16x8x3_t
 
typedef struct int32x2x3_t int32x2x3_t
 
typedef struct int32x4x3_t int32x4x3_t
 
typedef struct int64x1x3_t int64x1x3_t
 
typedef struct int64x2x3_t int64x2x3_t
 
typedef struct uint8x8x3_t uint8x8x3_t
 
typedef struct uint8x16x3_t uint8x16x3_t
 
typedef struct uint16x4x3_t uint16x4x3_t
 
typedef struct uint16x8x3_t uint16x8x3_t
 
typedef struct uint32x2x3_t uint32x2x3_t
 
typedef struct uint32x4x3_t uint32x4x3_t
 
typedef struct uint64x1x3_t uint64x1x3_t
 
typedef struct uint64x2x3_t uint64x2x3_t
 
typedef struct float16x4x3_t float16x4x3_t
 
typedef struct float16x8x3_t float16x8x3_t
 
typedef struct float32x2x3_t float32x2x3_t
 
typedef struct float32x4x3_t float32x4x3_t
 
typedef struct poly8x8x3_t poly8x8x3_t
 
typedef struct poly8x16x3_t poly8x16x3_t
 
typedef struct poly16x4x3_t poly16x4x3_t
 
typedef struct poly16x8x3_t poly16x8x3_t
 
typedef struct int8x8x4_t int8x8x4_t
 
typedef struct int8x16x4_t int8x16x4_t
 
typedef struct int16x4x4_t int16x4x4_t
 
typedef struct int16x8x4_t int16x8x4_t
 
typedef struct int32x2x4_t int32x2x4_t
 
typedef struct int32x4x4_t int32x4x4_t
 
typedef struct int64x1x4_t int64x1x4_t
 
typedef struct int64x2x4_t int64x2x4_t
 
typedef struct uint8x8x4_t uint8x8x4_t
 
typedef struct uint8x16x4_t uint8x16x4_t
 
typedef struct uint16x4x4_t uint16x4x4_t
 
typedef struct uint16x8x4_t uint16x8x4_t
 
typedef struct uint32x2x4_t uint32x2x4_t
 
typedef struct uint32x4x4_t uint32x4x4_t
 
typedef struct uint64x1x4_t uint64x1x4_t
 
typedef struct uint64x2x4_t uint64x2x4_t
 
typedef struct float16x4x4_t float16x4x4_t
 
typedef struct float16x8x4_t float16x8x4_t
 
typedef struct float32x2x4_t float32x2x4_t
 
typedef struct float32x4x4_t float32x4x4_t
 
typedef struct poly8x8x4_t poly8x8x4_t
 
typedef struct poly8x16x4_t poly8x16x4_t
 
typedef struct poly16x4x4_t poly16x4x4_t
 
typedef struct poly16x8x4_t poly16x8x4_t
 

Functions

typedef __attribute__ ((neon_vector_type(8))) int8_t int8x8_t
 
typedef __attribute__ ((neon_vector_type(16))) int8_t int8x16_t
 
typedef __attribute__ ((neon_vector_type(4))) int16_t int16x4_t
 
typedef __attribute__ ((neon_vector_type(2))) int32_t int32x2_t
 
typedef __attribute__ ((neon_vector_type(1))) int64_t int64x1_t
 
typedef __attribute__ ((neon_polyvector_type(8))) poly8_t poly8x8_t
 
typedef __attribute__ ((neon_polyvector_type(16))) poly8_t poly8x16_t
 
typedef __attribute__ ((neon_polyvector_type(4))) poly16_t poly16x4_t
 
__ai uint8x16_t vabdq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint8x16_t __noswap_vabdq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vabdq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint32x4_t __noswap_vabdq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vabdq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint16x8_t __noswap_vabdq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vabdq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int8x16_t __noswap_vabdq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vabdq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vabdq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x4_t __noswap_vabdq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vabdq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int16x8_t __noswap_vabdq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vabd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint8x8_t __noswap_vabd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vabd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x2_t __noswap_vabd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vabd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint16x4_t __noswap_vabd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vabd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int8x8_t __noswap_vabd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vabd_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vabd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x2_t __noswap_vabd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vabd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x4_t __noswap_vabd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int8x16_t vabsq_s8 (int8x16_t __p0)
 
__ai float32x4_t vabsq_f32 (float32x4_t __p0)
 
__ai int32x4_t vabsq_s32 (int32x4_t __p0)
 
__ai int16x8_t vabsq_s16 (int16x8_t __p0)
 
__ai int8x8_t vabs_s8 (int8x8_t __p0)
 
__ai float32x2_t vabs_f32 (float32x2_t __p0)
 
__ai int32x2_t vabs_s32 (int32x2_t __p0)
 
__ai int16x4_t vabs_s16 (int16x4_t __p0)
 
__ai uint8x16_t vaddq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vaddq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vaddq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vaddq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vaddq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vaddq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vaddq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vaddq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vaddq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vadd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vadd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vadd_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vadd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vadd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vadd_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vadd_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x4_t vaddhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x4_t __noswap_vaddhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint32x2_t vaddhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint32x2_t __noswap_vaddhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint8x8_t vaddhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x8_t __noswap_vaddhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int16x4_t vaddhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x4_t __noswap_vaddhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x2_t vaddhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int32x2_t __noswap_vaddhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int8x8_t vaddhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int8x8_t __noswap_vaddhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x16_t vandq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vandq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vandq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vandq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vandq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vandq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vandq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vandq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vand_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vand_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vand_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vand_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vand_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vand_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vand_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vand_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vbicq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vbicq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vbicq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vbicq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vbicq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vbicq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vbicq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vbicq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vbic_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vbic_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vbic_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vbic_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vbic_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vbic_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vbic_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vbic_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8_t vbsl_p8 (uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2)
 
__ai poly16x4_t vbsl_p16 (uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2)
 
__ai poly8x16_t vbslq_p8 (uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2)
 
__ai poly16x8_t vbslq_p16 (uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2)
 
__ai uint8x16_t vbslq_u8 (uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2)
 
__ai uint32x4_t vbslq_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2)
 
__ai uint64x2_t vbslq_u64 (uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2)
 
__ai uint16x8_t vbslq_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2)
 
__ai int8x16_t vbslq_s8 (uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2)
 
__ai float32x4_t vbslq_f32 (uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2)
 
__ai int32x4_t vbslq_s32 (uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2)
 
__ai int64x2_t vbslq_s64 (uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2)
 
__ai int16x8_t vbslq_s16 (uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2)
 
__ai uint8x8_t vbsl_u8 (uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint32x2_t vbsl_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint64x1_t vbsl_u64 (uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2)
 
__ai uint16x4_t vbsl_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int8x8_t vbsl_s8 (uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai float32x2_t vbsl_f32 (uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2)
 
__ai int32x2_t vbsl_s32 (uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x1_t vbsl_s64 (uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2)
 
__ai int16x4_t vbsl_s16 (uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint32x4_t vcageq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x2_t vcage_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x4_t vcagtq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x2_t vcagt_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x4_t vcaleq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x2_t vcale_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x4_t vcaltq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x2_t vcalt_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint8x8_t vceq_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai uint8x16_t vceqq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai uint8x16_t vceqq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vceqq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vceqq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vceqq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vceqq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x4_t vceqq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vceqq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vceq_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vceq_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vceq_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vceq_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vceq_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x2_t vceq_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vceq_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vcgeq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vcgeq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vcgeq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vcgeq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vcgeq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x4_t vcgeq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vcgeq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vcge_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vcge_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vcge_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vcge_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vcge_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x2_t vcge_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vcge_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vcgtq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vcgtq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vcgtq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vcgtq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vcgtq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x4_t vcgtq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vcgtq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vcgt_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vcgt_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vcgt_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vcgt_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vcgt_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x2_t vcgt_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vcgt_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vcleq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vcleq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vcleq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vcleq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vcleq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x4_t vcleq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vcleq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vcle_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vcle_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vcle_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vcle_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vcle_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x2_t vcle_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vcle_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int8x16_t vclsq_s8 (int8x16_t __p0)
 
__ai int32x4_t vclsq_s32 (int32x4_t __p0)
 
__ai int16x8_t vclsq_s16 (int16x8_t __p0)
 
__ai int8x8_t vcls_s8 (int8x8_t __p0)
 
__ai int32x2_t vcls_s32 (int32x2_t __p0)
 
__ai int16x4_t vcls_s16 (int16x4_t __p0)
 
__ai uint8x16_t vcltq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vcltq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vcltq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vcltq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vcltq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai uint32x4_t vcltq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vcltq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vclt_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vclt_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vclt_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vclt_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vclt_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint32x2_t vclt_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vclt_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vclzq_u8 (uint8x16_t __p0)
 
__ai uint32x4_t vclzq_u32 (uint32x4_t __p0)
 
__ai uint16x8_t vclzq_u16 (uint16x8_t __p0)
 
__ai int8x16_t vclzq_s8 (int8x16_t __p0)
 
__ai int32x4_t vclzq_s32 (int32x4_t __p0)
 
__ai int16x8_t vclzq_s16 (int16x8_t __p0)
 
__ai uint8x8_t vclz_u8 (uint8x8_t __p0)
 
__ai uint32x2_t vclz_u32 (uint32x2_t __p0)
 
__ai uint16x4_t vclz_u16 (uint16x4_t __p0)
 
__ai int8x8_t vclz_s8 (int8x8_t __p0)
 
__ai int32x2_t vclz_s32 (int32x2_t __p0)
 
__ai int16x4_t vclz_s16 (int16x4_t __p0)
 
__ai poly8x8_t vcnt_p8 (poly8x8_t __p0)
 
__ai poly8x16_t vcntq_p8 (poly8x16_t __p0)
 
__ai uint8x16_t vcntq_u8 (uint8x16_t __p0)
 
__ai int8x16_t vcntq_s8 (int8x16_t __p0)
 
__ai uint8x8_t vcnt_u8 (uint8x8_t __p0)
 
__ai int8x8_t vcnt_s8 (int8x8_t __p0)
 
__ai poly8x16_t vcombine_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly16x8_t vcombine_p16 (poly16x4_t __p0, poly16x4_t __p1)
 
__ai uint8x16_t vcombine_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint8x16_t __noswap_vcombine_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x4_t vcombine_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t __noswap_vcombine_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x2_t vcombine_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x8_t vcombine_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint16x8_t __noswap_vcombine_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x16_t vcombine_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int8x16_t __noswap_vcombine_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x4_t vcombine_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai float32x4_t __noswap_vcombine_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai float16x8_t vcombine_f16 (float16x4_t __p0, float16x4_t __p1)
 
__ai float16x8_t __noswap_vcombine_f16 (float16x4_t __p0, float16x4_t __p1)
 
__ai int32x4_t vcombine_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t __noswap_vcombine_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x2_t vcombine_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x8_t vcombine_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x8_t __noswap_vcombine_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8_t vcreate_p8 (uint64_t __p0)
 
__ai poly16x4_t vcreate_p16 (uint64_t __p0)
 
__ai uint8x8_t vcreate_u8 (uint64_t __p0)
 
__ai uint32x2_t vcreate_u32 (uint64_t __p0)
 
__ai uint64x1_t vcreate_u64 (uint64_t __p0)
 
__ai uint16x4_t vcreate_u16 (uint64_t __p0)
 
__ai int8x8_t vcreate_s8 (uint64_t __p0)
 
__ai float32x2_t vcreate_f32 (uint64_t __p0)
 
__ai float16x4_t vcreate_f16 (uint64_t __p0)
 
__ai int32x2_t vcreate_s32 (uint64_t __p0)
 
__ai int64x1_t vcreate_s64 (uint64_t __p0)
 
__ai int16x4_t vcreate_s16 (uint64_t __p0)
 
__ai float16x4_t vcvt_f16_f32 (float32x4_t __p0)
 
__ai float16x4_t __noswap_vcvt_f16_f32 (float32x4_t __p0)
 
__ai float32x4_t vcvtq_f32_u32 (uint32x4_t __p0)
 
__ai float32x4_t vcvtq_f32_s32 (int32x4_t __p0)
 
__ai float32x2_t vcvt_f32_u32 (uint32x2_t __p0)
 
__ai float32x2_t vcvt_f32_s32 (int32x2_t __p0)
 
__ai float32x4_t vcvt_f32_f16 (float16x4_t __p0)
 
__ai float32x4_t __noswap_vcvt_f32_f16 (float16x4_t __p0)
 
__ai int32x4_t vcvtq_s32_f32 (float32x4_t __p0)
 
__ai int32x2_t vcvt_s32_f32 (float32x2_t __p0)
 
__ai uint32x4_t vcvtq_u32_f32 (float32x4_t __p0)
 
__ai uint32x2_t vcvt_u32_f32 (float32x2_t __p0)
 
__ai poly8x8_t vdup_n_p8 (poly8_t __p0)
 
__ai poly16x4_t vdup_n_p16 (poly16_t __p0)
 
__ai poly8x16_t vdupq_n_p8 (poly8_t __p0)
 
__ai poly16x8_t vdupq_n_p16 (poly16_t __p0)
 
__ai uint8x16_t vdupq_n_u8 (uint8_t __p0)
 
__ai uint32x4_t vdupq_n_u32 (uint32_t __p0)
 
__ai uint64x2_t vdupq_n_u64 (uint64_t __p0)
 
__ai uint16x8_t vdupq_n_u16 (uint16_t __p0)
 
__ai int8x16_t vdupq_n_s8 (int8_t __p0)
 
__ai float32x4_t vdupq_n_f32 (float32_t __p0)
 
__ai int32x4_t vdupq_n_s32 (int32_t __p0)
 
__ai int64x2_t vdupq_n_s64 (int64_t __p0)
 
__ai int16x8_t vdupq_n_s16 (int16_t __p0)
 
__ai uint8x8_t vdup_n_u8 (uint8_t __p0)
 
__ai uint32x2_t vdup_n_u32 (uint32_t __p0)
 
__ai uint64x1_t vdup_n_u64 (uint64_t __p0)
 
__ai uint16x4_t vdup_n_u16 (uint16_t __p0)
 
__ai int8x8_t vdup_n_s8 (int8_t __p0)
 
__ai float32x2_t vdup_n_f32 (float32_t __p0)
 
__ai int32x2_t vdup_n_s32 (int32_t __p0)
 
__ai int64x1_t vdup_n_s64 (int64_t __p0)
 
__ai int16x4_t vdup_n_s16 (int16_t __p0)
 
__ai uint8x16_t veorq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t veorq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t veorq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t veorq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t veorq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t veorq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t veorq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t veorq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t veor_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t veor_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t veor_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t veor_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t veor_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t veor_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t veor_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t veor_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai float32x4_t vfmaq_f32 (float32x4_t __p0, float32x4_t __p1, float32x4_t __p2)
 
__ai float32x4_t __noswap_vfmaq_f32 (float32x4_t __p0, float32x4_t __p1, float32x4_t __p2)
 
__ai float32x2_t vfma_f32 (float32x2_t __p0, float32x2_t __p1, float32x2_t __p2)
 
__ai float32x2_t __noswap_vfma_f32 (float32x2_t __p0, float32x2_t __p1, float32x2_t __p2)
 
__ai poly8x8_t vget_high_p8 (poly8x16_t __p0)
 
__ai poly8x8_t __noswap_vget_high_p8 (poly8x16_t __p0)
 
__ai poly16x4_t vget_high_p16 (poly16x8_t __p0)
 
__ai uint8x8_t vget_high_u8 (uint8x16_t __p0)
 
__ai uint8x8_t __noswap_vget_high_u8 (uint8x16_t __p0)
 
__ai uint32x2_t vget_high_u32 (uint32x4_t __p0)
 
__ai uint32x2_t __noswap_vget_high_u32 (uint32x4_t __p0)
 
__ai uint64x1_t vget_high_u64 (uint64x2_t __p0)
 
__ai uint16x4_t vget_high_u16 (uint16x8_t __p0)
 
__ai uint16x4_t __noswap_vget_high_u16 (uint16x8_t __p0)
 
__ai int8x8_t vget_high_s8 (int8x16_t __p0)
 
__ai int8x8_t __noswap_vget_high_s8 (int8x16_t __p0)
 
__ai float32x2_t vget_high_f32 (float32x4_t __p0)
 
__ai float32x2_t __noswap_vget_high_f32 (float32x4_t __p0)
 
__ai float16x4_t vget_high_f16 (float16x8_t __p0)
 
__ai float16x4_t __noswap_vget_high_f16 (float16x8_t __p0)
 
__ai int32x2_t vget_high_s32 (int32x4_t __p0)
 
__ai int32x2_t __noswap_vget_high_s32 (int32x4_t __p0)
 
__ai int64x1_t vget_high_s64 (int64x2_t __p0)
 
__ai int16x4_t vget_high_s16 (int16x8_t __p0)
 
__ai int16x4_t __noswap_vget_high_s16 (int16x8_t __p0)
 
__ai poly8x8_t vget_low_p8 (poly8x16_t __p0)
 
__ai poly16x4_t vget_low_p16 (poly16x8_t __p0)
 
__ai uint8x8_t vget_low_u8 (uint8x16_t __p0)
 
__ai uint32x2_t vget_low_u32 (uint32x4_t __p0)
 
__ai uint64x1_t vget_low_u64 (uint64x2_t __p0)
 
__ai uint16x4_t vget_low_u16 (uint16x8_t __p0)
 
__ai int8x8_t vget_low_s8 (int8x16_t __p0)
 
__ai float32x2_t vget_low_f32 (float32x4_t __p0)
 
__ai float16x4_t vget_low_f16 (float16x8_t __p0)
 
__ai int32x2_t vget_low_s32 (int32x4_t __p0)
 
__ai int64x1_t vget_low_s64 (int64x2_t __p0)
 
__ai int16x4_t vget_low_s16 (int16x8_t __p0)
 
__ai uint8x16_t vhaddq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vhaddq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vhaddq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vhaddq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vhaddq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vhaddq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vhadd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vhadd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vhadd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vhadd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vhadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vhadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vhsubq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vhsubq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vhsubq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vhsubq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vhsubq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vhsubq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vhsub_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vhsub_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vhsub_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vhsub_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vhsub_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vhsub_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vmaxq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vmaxq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vmaxq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vmaxq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vmaxq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vmaxq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vmaxq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vmax_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vmax_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vmax_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vmax_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vmax_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vmax_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vmax_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vminq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vminq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vminq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vminq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vminq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vminq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vminq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vmin_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vmin_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vmin_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vmin_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vmin_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vmin_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vmin_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vmlaq_u8 (uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2)
 
__ai uint32x4_t vmlaq_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2)
 
__ai uint16x8_t vmlaq_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2)
 
__ai int8x16_t vmlaq_s8 (int8x16_t __p0, int8x16_t __p1, int8x16_t __p2)
 
__ai float32x4_t vmlaq_f32 (float32x4_t __p0, float32x4_t __p1, float32x4_t __p2)
 
__ai int32x4_t vmlaq_s32 (int32x4_t __p0, int32x4_t __p1, int32x4_t __p2)
 
__ai int16x8_t vmlaq_s16 (int16x8_t __p0, int16x8_t __p1, int16x8_t __p2)
 
__ai uint8x8_t vmla_u8 (uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint32x2_t vmla_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint16x4_t vmla_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int8x8_t vmla_s8 (int8x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai float32x2_t vmla_f32 (float32x2_t __p0, float32x2_t __p1, float32x2_t __p2)
 
__ai int32x2_t vmla_s32 (int32x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int16x4_t vmla_s16 (int16x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint32x4_t vmlaq_n_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2)
 
__ai uint16x8_t vmlaq_n_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2)
 
__ai float32x4_t vmlaq_n_f32 (float32x4_t __p0, float32x4_t __p1, float32_t __p2)
 
__ai int32x4_t vmlaq_n_s32 (int32x4_t __p0, int32x4_t __p1, int32_t __p2)
 
__ai int16x8_t vmlaq_n_s16 (int16x8_t __p0, int16x8_t __p1, int16_t __p2)
 
__ai uint32x2_t vmla_n_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint16x4_t vmla_n_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai float32x2_t vmla_n_f32 (float32x2_t __p0, float32x2_t __p1, float32_t __p2)
 
__ai int32x2_t vmla_n_s32 (int32x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int16x4_t vmla_n_s16 (int16x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai uint8x16_t vmlsq_u8 (uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2)
 
__ai uint32x4_t vmlsq_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2)
 
__ai uint16x8_t vmlsq_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2)
 
__ai int8x16_t vmlsq_s8 (int8x16_t __p0, int8x16_t __p1, int8x16_t __p2)
 
__ai float32x4_t vmlsq_f32 (float32x4_t __p0, float32x4_t __p1, float32x4_t __p2)
 
__ai int32x4_t vmlsq_s32 (int32x4_t __p0, int32x4_t __p1, int32x4_t __p2)
 
__ai int16x8_t vmlsq_s16 (int16x8_t __p0, int16x8_t __p1, int16x8_t __p2)
 
__ai uint8x8_t vmls_u8 (uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint32x2_t vmls_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint16x4_t vmls_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int8x8_t vmls_s8 (int8x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai float32x2_t vmls_f32 (float32x2_t __p0, float32x2_t __p1, float32x2_t __p2)
 
__ai int32x2_t vmls_s32 (int32x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int16x4_t vmls_s16 (int16x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint32x4_t vmlsq_n_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2)
 
__ai uint16x8_t vmlsq_n_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2)
 
__ai float32x4_t vmlsq_n_f32 (float32x4_t __p0, float32x4_t __p1, float32_t __p2)
 
__ai int32x4_t vmlsq_n_s32 (int32x4_t __p0, int32x4_t __p1, int32_t __p2)
 
__ai int16x8_t vmlsq_n_s16 (int16x8_t __p0, int16x8_t __p1, int16_t __p2)
 
__ai uint32x2_t vmls_n_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint16x4_t vmls_n_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai float32x2_t vmls_n_f32 (float32x2_t __p0, float32x2_t __p1, float32_t __p2)
 
__ai int32x2_t vmls_n_s32 (int32x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int16x4_t vmls_n_s16 (int16x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai poly8x8_t vmov_n_p8 (poly8_t __p0)
 
__ai poly16x4_t vmov_n_p16 (poly16_t __p0)
 
__ai poly8x16_t vmovq_n_p8 (poly8_t __p0)
 
__ai poly16x8_t vmovq_n_p16 (poly16_t __p0)
 
__ai uint8x16_t vmovq_n_u8 (uint8_t __p0)
 
__ai uint32x4_t vmovq_n_u32 (uint32_t __p0)
 
__ai uint64x2_t vmovq_n_u64 (uint64_t __p0)
 
__ai uint16x8_t vmovq_n_u16 (uint16_t __p0)
 
__ai int8x16_t vmovq_n_s8 (int8_t __p0)
 
__ai float32x4_t vmovq_n_f32 (float32_t __p0)
 
__ai int32x4_t vmovq_n_s32 (int32_t __p0)
 
__ai int64x2_t vmovq_n_s64 (int64_t __p0)
 
__ai int16x8_t vmovq_n_s16 (int16_t __p0)
 
__ai uint8x8_t vmov_n_u8 (uint8_t __p0)
 
__ai uint32x2_t vmov_n_u32 (uint32_t __p0)
 
__ai uint64x1_t vmov_n_u64 (uint64_t __p0)
 
__ai uint16x4_t vmov_n_u16 (uint16_t __p0)
 
__ai int8x8_t vmov_n_s8 (int8_t __p0)
 
__ai float32x2_t vmov_n_f32 (float32_t __p0)
 
__ai int32x2_t vmov_n_s32 (int32_t __p0)
 
__ai int64x1_t vmov_n_s64 (int64_t __p0)
 
__ai int16x4_t vmov_n_s16 (int16_t __p0)
 
__ai uint16x8_t vmovl_u8 (uint8x8_t __p0)
 
__ai uint16x8_t __noswap_vmovl_u8 (uint8x8_t __p0)
 
__ai uint64x2_t vmovl_u32 (uint32x2_t __p0)
 
__ai uint64x2_t __noswap_vmovl_u32 (uint32x2_t __p0)
 
__ai uint32x4_t vmovl_u16 (uint16x4_t __p0)
 
__ai uint32x4_t __noswap_vmovl_u16 (uint16x4_t __p0)
 
__ai int16x8_t vmovl_s8 (int8x8_t __p0)
 
__ai int16x8_t __noswap_vmovl_s8 (int8x8_t __p0)
 
__ai int64x2_t vmovl_s32 (int32x2_t __p0)
 
__ai int64x2_t __noswap_vmovl_s32 (int32x2_t __p0)
 
__ai int32x4_t vmovl_s16 (int16x4_t __p0)
 
__ai int32x4_t __noswap_vmovl_s16 (int16x4_t __p0)
 
__ai uint16x4_t vmovn_u32 (uint32x4_t __p0)
 
__ai uint16x4_t __noswap_vmovn_u32 (uint32x4_t __p0)
 
__ai uint32x2_t vmovn_u64 (uint64x2_t __p0)
 
__ai uint32x2_t __noswap_vmovn_u64 (uint64x2_t __p0)
 
__ai uint8x8_t vmovn_u16 (uint16x8_t __p0)
 
__ai uint8x8_t __noswap_vmovn_u16 (uint16x8_t __p0)
 
__ai int16x4_t vmovn_s32 (int32x4_t __p0)
 
__ai int16x4_t __noswap_vmovn_s32 (int32x4_t __p0)
 
__ai int32x2_t vmovn_s64 (int64x2_t __p0)
 
__ai int32x2_t __noswap_vmovn_s64 (int64x2_t __p0)
 
__ai int8x8_t vmovn_s16 (int16x8_t __p0)
 
__ai int8x8_t __noswap_vmovn_s16 (int16x8_t __p0)
 
__ai uint8x16_t vmulq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vmulq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vmulq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vmulq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vmulq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vmulq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vmulq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vmul_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vmul_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vmul_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vmul_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vmul_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vmul_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vmul_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8_t vmul_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly8x16_t vmulq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai uint32x4_t vmulq_n_u32 (uint32x4_t __p0, uint32_t __p1)
 
__ai uint16x8_t vmulq_n_u16 (uint16x8_t __p0, uint16_t __p1)
 
__ai float32x4_t vmulq_n_f32 (float32x4_t __p0, float32_t __p1)
 
__ai int32x4_t vmulq_n_s32 (int32x4_t __p0, int32_t __p1)
 
__ai int16x8_t vmulq_n_s16 (int16x8_t __p0, int16_t __p1)
 
__ai uint32x2_t vmul_n_u32 (uint32x2_t __p0, uint32_t __p1)
 
__ai uint16x4_t vmul_n_u16 (uint16x4_t __p0, uint16_t __p1)
 
__ai float32x2_t vmul_n_f32 (float32x2_t __p0, float32_t __p1)
 
__ai int32x2_t vmul_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int16x4_t vmul_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai poly16x8_t vmull_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly16x8_t __noswap_vmull_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai uint16x8_t vmull_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint16x8_t __noswap_vmull_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vmull_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x2_t __noswap_vmull_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vmull_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint32x4_t __noswap_vmull_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vmull_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int16x8_t __noswap_vmull_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vmull_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x2_t __noswap_vmull_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vmull_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int32x4_t __noswap_vmull_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint64x2_t vmull_n_u32 (uint32x2_t __p0, uint32_t __p1)
 
__ai uint64x2_t __noswap_vmull_n_u32 (uint32x2_t __p0, uint32_t __p1)
 
__ai uint32x4_t vmull_n_u16 (uint16x4_t __p0, uint16_t __p1)
 
__ai uint32x4_t __noswap_vmull_n_u16 (uint16x4_t __p0, uint16_t __p1)
 
__ai int64x2_t vmull_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int64x2_t __noswap_vmull_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int32x4_t vmull_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai int32x4_t __noswap_vmull_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai poly8x8_t vmvn_p8 (poly8x8_t __p0)
 
__ai poly8x16_t vmvnq_p8 (poly8x16_t __p0)
 
__ai uint8x16_t vmvnq_u8 (uint8x16_t __p0)
 
__ai uint32x4_t vmvnq_u32 (uint32x4_t __p0)
 
__ai uint16x8_t vmvnq_u16 (uint16x8_t __p0)
 
__ai int8x16_t vmvnq_s8 (int8x16_t __p0)
 
__ai int32x4_t vmvnq_s32 (int32x4_t __p0)
 
__ai int16x8_t vmvnq_s16 (int16x8_t __p0)
 
__ai uint8x8_t vmvn_u8 (uint8x8_t __p0)
 
__ai uint32x2_t vmvn_u32 (uint32x2_t __p0)
 
__ai uint16x4_t vmvn_u16 (uint16x4_t __p0)
 
__ai int8x8_t vmvn_s8 (int8x8_t __p0)
 
__ai int32x2_t vmvn_s32 (int32x2_t __p0)
 
__ai int16x4_t vmvn_s16 (int16x4_t __p0)
 
__ai int8x16_t vnegq_s8 (int8x16_t __p0)
 
__ai float32x4_t vnegq_f32 (float32x4_t __p0)
 
__ai int32x4_t vnegq_s32 (int32x4_t __p0)
 
__ai int16x8_t vnegq_s16 (int16x8_t __p0)
 
__ai int8x8_t vneg_s8 (int8x8_t __p0)
 
__ai float32x2_t vneg_f32 (float32x2_t __p0)
 
__ai int32x2_t vneg_s32 (int32x2_t __p0)
 
__ai int16x4_t vneg_s16 (int16x4_t __p0)
 
__ai uint8x16_t vornq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vornq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vornq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vornq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vornq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vornq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vornq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vornq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vorn_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vorn_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vorn_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vorn_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vorn_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vorn_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vorn_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vorn_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vorrq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vorrq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vorrq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vorrq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vorrq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vorrq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vorrq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vorrq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vorr_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vorr_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vorr_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vorr_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vorr_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vorr_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vorr_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vorr_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vpadalq_u8 (uint16x8_t __p0, uint8x16_t __p1)
 
__ai uint64x2_t vpadalq_u32 (uint64x2_t __p0, uint32x4_t __p1)
 
__ai uint32x4_t vpadalq_u16 (uint32x4_t __p0, uint16x8_t __p1)
 
__ai int16x8_t vpadalq_s8 (int16x8_t __p0, int8x16_t __p1)
 
__ai int64x2_t vpadalq_s32 (int64x2_t __p0, int32x4_t __p1)
 
__ai int32x4_t vpadalq_s16 (int32x4_t __p0, int16x8_t __p1)
 
__ai uint16x4_t vpadal_u8 (uint16x4_t __p0, uint8x8_t __p1)
 
__ai uint64x1_t vpadal_u32 (uint64x1_t __p0, uint32x2_t __p1)
 
__ai uint32x2_t vpadal_u16 (uint32x2_t __p0, uint16x4_t __p1)
 
__ai int16x4_t vpadal_s8 (int16x4_t __p0, int8x8_t __p1)
 
__ai int64x1_t vpadal_s32 (int64x1_t __p0, int32x2_t __p1)
 
__ai int32x2_t vpadal_s16 (int32x2_t __p0, int16x4_t __p1)
 
__ai uint8x8_t vpadd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vpadd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vpadd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vpadd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vpadd_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vpadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vpadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vpaddlq_u8 (uint8x16_t __p0)
 
__ai uint64x2_t vpaddlq_u32 (uint32x4_t __p0)
 
__ai uint32x4_t vpaddlq_u16 (uint16x8_t __p0)
 
__ai int16x8_t vpaddlq_s8 (int8x16_t __p0)
 
__ai int64x2_t vpaddlq_s32 (int32x4_t __p0)
 
__ai int32x4_t vpaddlq_s16 (int16x8_t __p0)
 
__ai uint16x4_t vpaddl_u8 (uint8x8_t __p0)
 
__ai uint64x1_t vpaddl_u32 (uint32x2_t __p0)
 
__ai uint32x2_t vpaddl_u16 (uint16x4_t __p0)
 
__ai int16x4_t vpaddl_s8 (int8x8_t __p0)
 
__ai int64x1_t vpaddl_s32 (int32x2_t __p0)
 
__ai int32x2_t vpaddl_s16 (int16x4_t __p0)
 
__ai uint8x8_t vpmax_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vpmax_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vpmax_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vpmax_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vpmax_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vpmax_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vpmax_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x8_t vpmin_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vpmin_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vpmin_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vpmin_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vpmin_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vpmin_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vpmin_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int8x16_t vqabsq_s8 (int8x16_t __p0)
 
__ai int32x4_t vqabsq_s32 (int32x4_t __p0)
 
__ai int16x8_t vqabsq_s16 (int16x8_t __p0)
 
__ai int8x8_t vqabs_s8 (int8x8_t __p0)
 
__ai int32x2_t vqabs_s32 (int32x2_t __p0)
 
__ai int16x4_t vqabs_s16 (int16x4_t __p0)
 
__ai uint8x16_t vqaddq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vqaddq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vqaddq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vqaddq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vqaddq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vqaddq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x4_t __noswap_vqaddq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vqaddq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vqaddq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int16x8_t __noswap_vqaddq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vqadd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vqadd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vqadd_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vqadd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vqadd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vqadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x2_t __noswap_vqadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vqadd_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vqadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x4_t __noswap_vqadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int64x2_t vqdmlal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x2_t __noswap_vqdmlal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int32x4_t vqdmlal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int32x4_t __noswap_vqdmlal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int64x2_t vqdmlal_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int64x2_t __noswap_vqdmlal_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int32x4_t vqdmlal_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int32x4_t __noswap_vqdmlal_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int64x2_t vqdmlsl_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x2_t __noswap_vqdmlsl_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int32x4_t vqdmlsl_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int32x4_t __noswap_vqdmlsl_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int64x2_t vqdmlsl_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int64x2_t __noswap_vqdmlsl_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int32x4_t vqdmlsl_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int32x4_t __noswap_vqdmlsl_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int32x4_t vqdmulhq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x4_t __noswap_vqdmulhq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vqdmulhq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int16x8_t __noswap_vqdmulhq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int32x2_t vqdmulh_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x2_t __noswap_vqdmulh_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vqdmulh_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x4_t __noswap_vqdmulh_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int32x4_t vqdmulhq_n_s32 (int32x4_t __p0, int32_t __p1)
 
__ai int16x8_t vqdmulhq_n_s16 (int16x8_t __p0, int16_t __p1)
 
__ai int32x2_t vqdmulh_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int16x4_t vqdmulh_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai int64x2_t vqdmull_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x2_t __noswap_vqdmull_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vqdmull_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int32x4_t __noswap_vqdmull_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int64x2_t vqdmull_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int64x2_t __noswap_vqdmull_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int32x4_t vqdmull_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai int32x4_t __noswap_vqdmull_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai uint16x4_t vqmovn_u32 (uint32x4_t __p0)
 
__ai uint16x4_t __noswap_vqmovn_u32 (uint32x4_t __p0)
 
__ai uint32x2_t vqmovn_u64 (uint64x2_t __p0)
 
__ai uint32x2_t __noswap_vqmovn_u64 (uint64x2_t __p0)
 
__ai uint8x8_t vqmovn_u16 (uint16x8_t __p0)
 
__ai uint8x8_t __noswap_vqmovn_u16 (uint16x8_t __p0)
 
__ai int16x4_t vqmovn_s32 (int32x4_t __p0)
 
__ai int16x4_t __noswap_vqmovn_s32 (int32x4_t __p0)
 
__ai int32x2_t vqmovn_s64 (int64x2_t __p0)
 
__ai int32x2_t __noswap_vqmovn_s64 (int64x2_t __p0)
 
__ai int8x8_t vqmovn_s16 (int16x8_t __p0)
 
__ai int8x8_t __noswap_vqmovn_s16 (int16x8_t __p0)
 
__ai uint16x4_t vqmovun_s32 (int32x4_t __p0)
 
__ai uint16x4_t __noswap_vqmovun_s32 (int32x4_t __p0)
 
__ai uint32x2_t vqmovun_s64 (int64x2_t __p0)
 
__ai uint32x2_t __noswap_vqmovun_s64 (int64x2_t __p0)
 
__ai uint8x8_t vqmovun_s16 (int16x8_t __p0)
 
__ai uint8x8_t __noswap_vqmovun_s16 (int16x8_t __p0)
 
__ai int8x16_t vqnegq_s8 (int8x16_t __p0)
 
__ai int32x4_t vqnegq_s32 (int32x4_t __p0)
 
__ai int16x8_t vqnegq_s16 (int16x8_t __p0)
 
__ai int8x8_t vqneg_s8 (int8x8_t __p0)
 
__ai int32x2_t vqneg_s32 (int32x2_t __p0)
 
__ai int16x4_t vqneg_s16 (int16x4_t __p0)
 
__ai int32x4_t vqrdmulhq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x4_t __noswap_vqrdmulhq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vqrdmulhq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int16x8_t __noswap_vqrdmulhq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int32x2_t vqrdmulh_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x2_t __noswap_vqrdmulh_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vqrdmulh_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x4_t __noswap_vqrdmulh_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int32x4_t vqrdmulhq_n_s32 (int32x4_t __p0, int32_t __p1)
 
__ai int16x8_t vqrdmulhq_n_s16 (int16x8_t __p0, int16_t __p1)
 
__ai int32x2_t vqrdmulh_n_s32 (int32x2_t __p0, int32_t __p1)
 
__ai int16x4_t vqrdmulh_n_s16 (int16x4_t __p0, int16_t __p1)
 
__ai uint8x16_t vqrshlq_u8 (uint8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vqrshlq_u32 (uint32x4_t __p0, int32x4_t __p1)
 
__ai uint64x2_t vqrshlq_u64 (uint64x2_t __p0, int64x2_t __p1)
 
__ai uint16x8_t vqrshlq_u16 (uint16x8_t __p0, int16x8_t __p1)
 
__ai int8x16_t vqrshlq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vqrshlq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vqrshlq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vqrshlq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vqrshl_u8 (uint8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vqrshl_u32 (uint32x2_t __p0, int32x2_t __p1)
 
__ai uint64x1_t vqrshl_u64 (uint64x1_t __p0, int64x1_t __p1)
 
__ai uint16x4_t vqrshl_u16 (uint16x4_t __p0, int16x4_t __p1)
 
__ai int8x8_t vqrshl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vqrshl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vqrshl_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vqrshl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vqshlq_u8 (uint8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vqshlq_u32 (uint32x4_t __p0, int32x4_t __p1)
 
__ai uint64x2_t vqshlq_u64 (uint64x2_t __p0, int64x2_t __p1)
 
__ai uint16x8_t vqshlq_u16 (uint16x8_t __p0, int16x8_t __p1)
 
__ai int8x16_t vqshlq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vqshlq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vqshlq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vqshlq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vqshl_u8 (uint8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vqshl_u32 (uint32x2_t __p0, int32x2_t __p1)
 
__ai uint64x1_t vqshl_u64 (uint64x1_t __p0, int64x1_t __p1)
 
__ai uint16x4_t vqshl_u16 (uint16x4_t __p0, int16x4_t __p1)
 
__ai int8x8_t vqshl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vqshl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vqshl_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vqshl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vqsubq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vqsubq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vqsubq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vqsubq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vqsubq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vqsubq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x4_t __noswap_vqsubq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vqsubq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vqsubq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int16x8_t __noswap_vqsubq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vqsub_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vqsub_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vqsub_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vqsub_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vqsub_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vqsub_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x2_t __noswap_vqsub_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vqsub_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vqsub_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int16x4_t __noswap_vqsub_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x4_t vraddhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x4_t __noswap_vraddhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint32x2_t vraddhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint32x2_t __noswap_vraddhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint8x8_t vraddhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x8_t __noswap_vraddhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int16x4_t vraddhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x4_t __noswap_vraddhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x2_t vraddhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int32x2_t __noswap_vraddhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int8x8_t vraddhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int8x8_t __noswap_vraddhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint32x4_t vrecpeq_u32 (uint32x4_t __p0)
 
__ai float32x4_t vrecpeq_f32 (float32x4_t __p0)
 
__ai uint32x2_t vrecpe_u32 (uint32x2_t __p0)
 
__ai float32x2_t vrecpe_f32 (float32x2_t __p0)
 
__ai float32x4_t vrecpsq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai float32x2_t vrecps_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai poly8x8_t vrev16_p8 (poly8x8_t __p0)
 
__ai poly8x16_t vrev16q_p8 (poly8x16_t __p0)
 
__ai uint8x16_t vrev16q_u8 (uint8x16_t __p0)
 
__ai int8x16_t vrev16q_s8 (int8x16_t __p0)
 
__ai uint8x8_t vrev16_u8 (uint8x8_t __p0)
 
__ai int8x8_t vrev16_s8 (int8x8_t __p0)
 
__ai poly8x8_t vrev32_p8 (poly8x8_t __p0)
 
__ai poly16x4_t vrev32_p16 (poly16x4_t __p0)
 
__ai poly8x16_t vrev32q_p8 (poly8x16_t __p0)
 
__ai poly16x8_t vrev32q_p16 (poly16x8_t __p0)
 
__ai uint8x16_t vrev32q_u8 (uint8x16_t __p0)
 
__ai uint16x8_t vrev32q_u16 (uint16x8_t __p0)
 
__ai int8x16_t vrev32q_s8 (int8x16_t __p0)
 
__ai int16x8_t vrev32q_s16 (int16x8_t __p0)
 
__ai uint8x8_t vrev32_u8 (uint8x8_t __p0)
 
__ai uint16x4_t vrev32_u16 (uint16x4_t __p0)
 
__ai int8x8_t vrev32_s8 (int8x8_t __p0)
 
__ai int16x4_t vrev32_s16 (int16x4_t __p0)
 
__ai poly8x8_t vrev64_p8 (poly8x8_t __p0)
 
__ai poly16x4_t vrev64_p16 (poly16x4_t __p0)
 
__ai poly8x16_t vrev64q_p8 (poly8x16_t __p0)
 
__ai poly16x8_t vrev64q_p16 (poly16x8_t __p0)
 
__ai uint8x16_t vrev64q_u8 (uint8x16_t __p0)
 
__ai uint32x4_t vrev64q_u32 (uint32x4_t __p0)
 
__ai uint16x8_t vrev64q_u16 (uint16x8_t __p0)
 
__ai int8x16_t vrev64q_s8 (int8x16_t __p0)
 
__ai float32x4_t vrev64q_f32 (float32x4_t __p0)
 
__ai int32x4_t vrev64q_s32 (int32x4_t __p0)
 
__ai int16x8_t vrev64q_s16 (int16x8_t __p0)
 
__ai uint8x8_t vrev64_u8 (uint8x8_t __p0)
 
__ai uint32x2_t vrev64_u32 (uint32x2_t __p0)
 
__ai uint16x4_t vrev64_u16 (uint16x4_t __p0)
 
__ai int8x8_t vrev64_s8 (int8x8_t __p0)
 
__ai float32x2_t vrev64_f32 (float32x2_t __p0)
 
__ai int32x2_t vrev64_s32 (int32x2_t __p0)
 
__ai int16x4_t vrev64_s16 (int16x4_t __p0)
 
__ai uint8x16_t vrhaddq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vrhaddq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vrhaddq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vrhaddq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vrhaddq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8_t vrhaddq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vrhadd_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vrhadd_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vrhadd_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vrhadd_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vrhadd_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4_t vrhadd_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vrshlq_u8 (uint8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vrshlq_u32 (uint32x4_t __p0, int32x4_t __p1)
 
__ai uint64x2_t vrshlq_u64 (uint64x2_t __p0, int64x2_t __p1)
 
__ai uint16x8_t vrshlq_u16 (uint16x8_t __p0, int16x8_t __p1)
 
__ai int8x16_t vrshlq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vrshlq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vrshlq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vrshlq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vrshl_u8 (uint8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vrshl_u32 (uint32x2_t __p0, int32x2_t __p1)
 
__ai uint64x1_t vrshl_u64 (uint64x1_t __p0, int64x1_t __p1)
 
__ai uint16x4_t vrshl_u16 (uint16x4_t __p0, int16x4_t __p1)
 
__ai int8x8_t vrshl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vrshl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vrshl_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vrshl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint32x4_t vrsqrteq_u32 (uint32x4_t __p0)
 
__ai float32x4_t vrsqrteq_f32 (float32x4_t __p0)
 
__ai uint32x2_t vrsqrte_u32 (uint32x2_t __p0)
 
__ai float32x2_t vrsqrte_f32 (float32x2_t __p0)
 
__ai float32x4_t vrsqrtsq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai float32x2_t vrsqrts_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai uint16x4_t vrsubhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x4_t __noswap_vrsubhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint32x2_t vrsubhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint32x2_t __noswap_vrsubhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint8x8_t vrsubhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x8_t __noswap_vrsubhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int16x4_t vrsubhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x4_t __noswap_vrsubhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x2_t vrsubhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int32x2_t __noswap_vrsubhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int8x8_t vrsubhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int8x8_t __noswap_vrsubhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x16_t vshlq_u8 (uint8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vshlq_u32 (uint32x4_t __p0, int32x4_t __p1)
 
__ai uint64x2_t vshlq_u64 (uint64x2_t __p0, int64x2_t __p1)
 
__ai uint16x8_t vshlq_u16 (uint16x8_t __p0, int16x8_t __p1)
 
__ai int8x16_t vshlq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai int32x4_t vshlq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vshlq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vshlq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vshl_u8 (uint8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vshl_u32 (uint32x2_t __p0, int32x2_t __p1)
 
__ai uint64x1_t vshl_u64 (uint64x1_t __p0, int64x1_t __p1)
 
__ai uint16x4_t vshl_u16 (uint16x4_t __p0, int16x4_t __p1)
 
__ai int8x8_t vshl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int32x2_t vshl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vshl_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vshl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x16_t vsubq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vsubq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint64x2_t vsubq_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint16x8_t vsubq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16_t vsubq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4_t vsubq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4_t vsubq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int64x2_t vsubq_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int16x8_t vsubq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vsub_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vsub_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x1_t vsub_u64 (uint64x1_t __p0, uint64x1_t __p1)
 
__ai uint16x4_t vsub_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8_t vsub_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2_t vsub_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2_t vsub_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x1_t vsub_s64 (int64x1_t __p0, int64x1_t __p1)
 
__ai int16x4_t vsub_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x4_t vsubhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x4_t __noswap_vsubhn_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint32x2_t vsubhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint32x2_t __noswap_vsubhn_u64 (uint64x2_t __p0, uint64x2_t __p1)
 
__ai uint8x8_t vsubhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x8_t __noswap_vsubhn_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int16x4_t vsubhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x4_t __noswap_vsubhn_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int32x2_t vsubhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int32x2_t __noswap_vsubhn_s64 (int64x2_t __p0, int64x2_t __p1)
 
__ai int8x8_t vsubhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai int8x8_t __noswap_vsubhn_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint16x8_t vsubl_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vsubl_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vsubl_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vsubl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vsubl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vsubl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vsubw_u8 (uint16x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vsubw_u32 (uint64x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vsubw_u16 (uint32x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vsubw_s8 (int16x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vsubw_s32 (int64x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vsubw_s16 (int32x4_t __p0, int16x4_t __p1)
 
__ai poly8x8_t vtbl1_p8 (poly8x8_t __p0, uint8x8_t __p1)
 
__ai uint8x8_t vtbl1_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai int8x8_t vtbl1_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai poly8x8_t vtbl2_p8 (poly8x8x2_t __p0, uint8x8_t __p1)
 
__ai uint8x8_t vtbl2_u8 (uint8x8x2_t __p0, uint8x8_t __p1)
 
__ai int8x8_t vtbl2_s8 (int8x8x2_t __p0, int8x8_t __p1)
 
__ai poly8x8_t vtbl3_p8 (poly8x8x3_t __p0, uint8x8_t __p1)
 
__ai uint8x8_t vtbl3_u8 (uint8x8x3_t __p0, uint8x8_t __p1)
 
__ai int8x8_t vtbl3_s8 (int8x8x3_t __p0, int8x8_t __p1)
 
__ai poly8x8_t vtbl4_p8 (poly8x8x4_t __p0, uint8x8_t __p1)
 
__ai uint8x8_t vtbl4_u8 (uint8x8x4_t __p0, uint8x8_t __p1)
 
__ai int8x8_t vtbl4_s8 (int8x8x4_t __p0, int8x8_t __p1)
 
__ai poly8x8_t vtbx1_p8 (poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2)
 
__ai uint8x8_t vtbx1_u8 (uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai int8x8_t vtbx1_s8 (int8x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai poly8x8_t vtbx2_p8 (poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2)
 
__ai uint8x8_t vtbx2_u8 (uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2)
 
__ai int8x8_t vtbx2_s8 (int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2)
 
__ai poly8x8_t vtbx3_p8 (poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2)
 
__ai uint8x8_t vtbx3_u8 (uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2)
 
__ai int8x8_t vtbx3_s8 (int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2)
 
__ai poly8x8_t vtbx4_p8 (poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2)
 
__ai uint8x8_t vtbx4_u8 (uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2)
 
__ai int8x8_t vtbx4_s8 (int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2)
 
__ai poly8x8x2_t vtrn_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly16x4x2_t vtrn_p16 (poly16x4_t __p0, poly16x4_t __p1)
 
__ai poly8x16x2_t vtrnq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai poly16x8x2_t vtrnq_p16 (poly16x8_t __p0, poly16x8_t __p1)
 
__ai uint8x16x2_t vtrnq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4x2_t vtrnq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8x2_t vtrnq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16x2_t vtrnq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4x2_t vtrnq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4x2_t vtrnq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8x2_t vtrnq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8x2_t vtrn_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2x2_t vtrn_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4x2_t vtrn_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8x2_t vtrn_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2x2_t vtrn_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2x2_t vtrn_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4x2_t vtrn_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint8x8_t vtst_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai uint16x4_t vtst_p16 (poly16x4_t __p0, poly16x4_t __p1)
 
__ai uint8x16_t vtstq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai uint16x8_t vtstq_p16 (poly16x8_t __p0, poly16x8_t __p1)
 
__ai uint8x16_t vtstq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4_t vtstq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8_t vtstq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai uint8x16_t vtstq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai uint32x4_t vtstq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai uint16x8_t vtstq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8_t vtst_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2_t vtst_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4_t vtst_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint8x8_t vtst_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai uint32x2_t vtst_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai uint16x4_t vtst_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8x2_t vuzp_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly16x4x2_t vuzp_p16 (poly16x4_t __p0, poly16x4_t __p1)
 
__ai poly8x16x2_t vuzpq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai poly16x8x2_t vuzpq_p16 (poly16x8_t __p0, poly16x8_t __p1)
 
__ai uint8x16x2_t vuzpq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4x2_t vuzpq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8x2_t vuzpq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16x2_t vuzpq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4x2_t vuzpq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4x2_t vuzpq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8x2_t vuzpq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8x2_t vuzp_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2x2_t vuzp_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4x2_t vuzp_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8x2_t vuzp_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2x2_t vuzp_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2x2_t vuzp_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4x2_t vuzp_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8x2_t vzip_p8 (poly8x8_t __p0, poly8x8_t __p1)
 
__ai poly16x4x2_t vzip_p16 (poly16x4_t __p0, poly16x4_t __p1)
 
__ai poly8x16x2_t vzipq_p8 (poly8x16_t __p0, poly8x16_t __p1)
 
__ai poly16x8x2_t vzipq_p16 (poly16x8_t __p0, poly16x8_t __p1)
 
__ai uint8x16x2_t vzipq_u8 (uint8x16_t __p0, uint8x16_t __p1)
 
__ai uint32x4x2_t vzipq_u32 (uint32x4_t __p0, uint32x4_t __p1)
 
__ai uint16x8x2_t vzipq_u16 (uint16x8_t __p0, uint16x8_t __p1)
 
__ai int8x16x2_t vzipq_s8 (int8x16_t __p0, int8x16_t __p1)
 
__ai float32x4x2_t vzipq_f32 (float32x4_t __p0, float32x4_t __p1)
 
__ai int32x4x2_t vzipq_s32 (int32x4_t __p0, int32x4_t __p1)
 
__ai int16x8x2_t vzipq_s16 (int16x8_t __p0, int16x8_t __p1)
 
__ai uint8x8x2_t vzip_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint32x2x2_t vzip_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint16x4x2_t vzip_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int8x8x2_t vzip_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai float32x2x2_t vzip_f32 (float32x2_t __p0, float32x2_t __p1)
 
__ai int32x2x2_t vzip_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int16x4x2_t vzip_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai poly8x8_t vreinterpret_p8_p16 (poly16x4_t __p0)
 
__ai poly8x8_t vreinterpret_p8_u8 (uint8x8_t __p0)
 
__ai poly8x8_t vreinterpret_p8_u32 (uint32x2_t __p0)
 
__ai poly8x8_t vreinterpret_p8_u64 (uint64x1_t __p0)
 
__ai poly8x8_t vreinterpret_p8_u16 (uint16x4_t __p0)
 
__ai poly8x8_t vreinterpret_p8_s8 (int8x8_t __p0)
 
__ai poly8x8_t vreinterpret_p8_f32 (float32x2_t __p0)
 
__ai poly8x8_t vreinterpret_p8_f16 (float16x4_t __p0)
 
__ai poly8x8_t vreinterpret_p8_s32 (int32x2_t __p0)
 
__ai poly8x8_t vreinterpret_p8_s64 (int64x1_t __p0)
 
__ai poly8x8_t vreinterpret_p8_s16 (int16x4_t __p0)
 
__ai poly16x4_t vreinterpret_p16_p8 (poly8x8_t __p0)
 
__ai poly16x4_t vreinterpret_p16_u8 (uint8x8_t __p0)
 
__ai poly16x4_t vreinterpret_p16_u32 (uint32x2_t __p0)
 
__ai poly16x4_t vreinterpret_p16_u64 (uint64x1_t __p0)
 
__ai poly16x4_t vreinterpret_p16_u16 (uint16x4_t __p0)
 
__ai poly16x4_t vreinterpret_p16_s8 (int8x8_t __p0)
 
__ai poly16x4_t vreinterpret_p16_f32 (float32x2_t __p0)
 
__ai poly16x4_t vreinterpret_p16_f16 (float16x4_t __p0)
 
__ai poly16x4_t vreinterpret_p16_s32 (int32x2_t __p0)
 
__ai poly16x4_t vreinterpret_p16_s64 (int64x1_t __p0)
 
__ai poly16x4_t vreinterpret_p16_s16 (int16x4_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_p16 (poly16x8_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_u8 (uint8x16_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_u32 (uint32x4_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_u64 (uint64x2_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_u16 (uint16x8_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_s8 (int8x16_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_f32 (float32x4_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_f16 (float16x8_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_s32 (int32x4_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_s64 (int64x2_t __p0)
 
__ai poly8x16_t vreinterpretq_p8_s16 (int16x8_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_p8 (poly8x16_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_u8 (uint8x16_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_u32 (uint32x4_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_u64 (uint64x2_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_u16 (uint16x8_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_s8 (int8x16_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_f32 (float32x4_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_f16 (float16x8_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_s32 (int32x4_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_s64 (int64x2_t __p0)
 
__ai poly16x8_t vreinterpretq_p16_s16 (int16x8_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_p8 (poly8x16_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_p16 (poly16x8_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_u32 (uint32x4_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_u64 (uint64x2_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_u16 (uint16x8_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_s8 (int8x16_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_f32 (float32x4_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_f16 (float16x8_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_s32 (int32x4_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_s64 (int64x2_t __p0)
 
__ai uint8x16_t vreinterpretq_u8_s16 (int16x8_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_p8 (poly8x16_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_p16 (poly16x8_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_u8 (uint8x16_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_u64 (uint64x2_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_u16 (uint16x8_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_s8 (int8x16_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_f32 (float32x4_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_f16 (float16x8_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_s32 (int32x4_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_s64 (int64x2_t __p0)
 
__ai uint32x4_t vreinterpretq_u32_s16 (int16x8_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_p8 (poly8x16_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_p16 (poly16x8_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_u8 (uint8x16_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_u32 (uint32x4_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_u16 (uint16x8_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_s8 (int8x16_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_f32 (float32x4_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_f16 (float16x8_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_s32 (int32x4_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_s64 (int64x2_t __p0)
 
__ai uint64x2_t vreinterpretq_u64_s16 (int16x8_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_p8 (poly8x16_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_p16 (poly16x8_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_u8 (uint8x16_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_u32 (uint32x4_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_u64 (uint64x2_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_s8 (int8x16_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_f32 (float32x4_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_f16 (float16x8_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_s32 (int32x4_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_s64 (int64x2_t __p0)
 
__ai uint16x8_t vreinterpretq_u16_s16 (int16x8_t __p0)
 
__ai int8x16_t vreinterpretq_s8_p8 (poly8x16_t __p0)
 
__ai int8x16_t vreinterpretq_s8_p16 (poly16x8_t __p0)
 
__ai int8x16_t vreinterpretq_s8_u8 (uint8x16_t __p0)
 
__ai int8x16_t vreinterpretq_s8_u32 (uint32x4_t __p0)
 
__ai int8x16_t vreinterpretq_s8_u64 (uint64x2_t __p0)
 
__ai int8x16_t vreinterpretq_s8_u16 (uint16x8_t __p0)
 
__ai int8x16_t vreinterpretq_s8_f32 (float32x4_t __p0)
 
__ai int8x16_t vreinterpretq_s8_f16 (float16x8_t __p0)
 
__ai int8x16_t vreinterpretq_s8_s32 (int32x4_t __p0)
 
__ai int8x16_t vreinterpretq_s8_s64 (int64x2_t __p0)
 
__ai int8x16_t vreinterpretq_s8_s16 (int16x8_t __p0)
 
__ai float32x4_t vreinterpretq_f32_p8 (poly8x16_t __p0)
 
__ai float32x4_t vreinterpretq_f32_p16 (poly16x8_t __p0)
 
__ai float32x4_t vreinterpretq_f32_u8 (uint8x16_t __p0)
 
__ai float32x4_t vreinterpretq_f32_u32 (uint32x4_t __p0)
 
__ai float32x4_t vreinterpretq_f32_u64 (uint64x2_t __p0)
 
__ai float32x4_t vreinterpretq_f32_u16 (uint16x8_t __p0)
 
__ai float32x4_t vreinterpretq_f32_s8 (int8x16_t __p0)
 
__ai float32x4_t vreinterpretq_f32_f16 (float16x8_t __p0)
 
__ai float32x4_t vreinterpretq_f32_s32 (int32x4_t __p0)
 
__ai float32x4_t vreinterpretq_f32_s64 (int64x2_t __p0)
 
__ai float32x4_t vreinterpretq_f32_s16 (int16x8_t __p0)
 
__ai float16x8_t vreinterpretq_f16_p8 (poly8x16_t __p0)
 
__ai float16x8_t vreinterpretq_f16_p16 (poly16x8_t __p0)
 
__ai float16x8_t vreinterpretq_f16_u8 (uint8x16_t __p0)
 
__ai float16x8_t vreinterpretq_f16_u32 (uint32x4_t __p0)
 
__ai float16x8_t vreinterpretq_f16_u64 (uint64x2_t __p0)
 
__ai float16x8_t vreinterpretq_f16_u16 (uint16x8_t __p0)
 
__ai float16x8_t vreinterpretq_f16_s8 (int8x16_t __p0)
 
__ai float16x8_t vreinterpretq_f16_f32 (float32x4_t __p0)
 
__ai float16x8_t vreinterpretq_f16_s32 (int32x4_t __p0)
 
__ai float16x8_t vreinterpretq_f16_s64 (int64x2_t __p0)
 
__ai float16x8_t vreinterpretq_f16_s16 (int16x8_t __p0)
 
__ai int32x4_t vreinterpretq_s32_p8 (poly8x16_t __p0)
 
__ai int32x4_t vreinterpretq_s32_p16 (poly16x8_t __p0)
 
__ai int32x4_t vreinterpretq_s32_u8 (uint8x16_t __p0)
 
__ai int32x4_t vreinterpretq_s32_u32 (uint32x4_t __p0)
 
__ai int32x4_t vreinterpretq_s32_u64 (uint64x2_t __p0)
 
__ai int32x4_t vreinterpretq_s32_u16 (uint16x8_t __p0)
 
__ai int32x4_t vreinterpretq_s32_s8 (int8x16_t __p0)
 
__ai int32x4_t vreinterpretq_s32_f32 (float32x4_t __p0)
 
__ai int32x4_t vreinterpretq_s32_f16 (float16x8_t __p0)
 
__ai int32x4_t vreinterpretq_s32_s64 (int64x2_t __p0)
 
__ai int32x4_t vreinterpretq_s32_s16 (int16x8_t __p0)
 
__ai int64x2_t vreinterpretq_s64_p8 (poly8x16_t __p0)
 
__ai int64x2_t vreinterpretq_s64_p16 (poly16x8_t __p0)
 
__ai int64x2_t vreinterpretq_s64_u8 (uint8x16_t __p0)
 
__ai int64x2_t vreinterpretq_s64_u32 (uint32x4_t __p0)
 
__ai int64x2_t vreinterpretq_s64_u64 (uint64x2_t __p0)
 
__ai int64x2_t vreinterpretq_s64_u16 (uint16x8_t __p0)
 
__ai int64x2_t vreinterpretq_s64_s8 (int8x16_t __p0)
 
__ai int64x2_t vreinterpretq_s64_f32 (float32x4_t __p0)
 
__ai int64x2_t vreinterpretq_s64_f16 (float16x8_t __p0)
 
__ai int64x2_t vreinterpretq_s64_s32 (int32x4_t __p0)
 
__ai int64x2_t vreinterpretq_s64_s16 (int16x8_t __p0)
 
__ai int16x8_t vreinterpretq_s16_p8 (poly8x16_t __p0)
 
__ai int16x8_t vreinterpretq_s16_p16 (poly16x8_t __p0)
 
__ai int16x8_t vreinterpretq_s16_u8 (uint8x16_t __p0)
 
__ai int16x8_t vreinterpretq_s16_u32 (uint32x4_t __p0)
 
__ai int16x8_t vreinterpretq_s16_u64 (uint64x2_t __p0)
 
__ai int16x8_t vreinterpretq_s16_u16 (uint16x8_t __p0)
 
__ai int16x8_t vreinterpretq_s16_s8 (int8x16_t __p0)
 
__ai int16x8_t vreinterpretq_s16_f32 (float32x4_t __p0)
 
__ai int16x8_t vreinterpretq_s16_f16 (float16x8_t __p0)
 
__ai int16x8_t vreinterpretq_s16_s32 (int32x4_t __p0)
 
__ai int16x8_t vreinterpretq_s16_s64 (int64x2_t __p0)
 
__ai uint8x8_t vreinterpret_u8_p8 (poly8x8_t __p0)
 
__ai uint8x8_t vreinterpret_u8_p16 (poly16x4_t __p0)
 
__ai uint8x8_t vreinterpret_u8_u32 (uint32x2_t __p0)
 
__ai uint8x8_t vreinterpret_u8_u64 (uint64x1_t __p0)
 
__ai uint8x8_t vreinterpret_u8_u16 (uint16x4_t __p0)
 
__ai uint8x8_t vreinterpret_u8_s8 (int8x8_t __p0)
 
__ai uint8x8_t vreinterpret_u8_f32 (float32x2_t __p0)
 
__ai uint8x8_t vreinterpret_u8_f16 (float16x4_t __p0)
 
__ai uint8x8_t vreinterpret_u8_s32 (int32x2_t __p0)
 
__ai uint8x8_t vreinterpret_u8_s64 (int64x1_t __p0)
 
__ai uint8x8_t vreinterpret_u8_s16 (int16x4_t __p0)
 
__ai uint32x2_t vreinterpret_u32_p8 (poly8x8_t __p0)
 
__ai uint32x2_t vreinterpret_u32_p16 (poly16x4_t __p0)
 
__ai uint32x2_t vreinterpret_u32_u8 (uint8x8_t __p0)
 
__ai uint32x2_t vreinterpret_u32_u64 (uint64x1_t __p0)
 
__ai uint32x2_t vreinterpret_u32_u16 (uint16x4_t __p0)
 
__ai uint32x2_t vreinterpret_u32_s8 (int8x8_t __p0)
 
__ai uint32x2_t vreinterpret_u32_f32 (float32x2_t __p0)
 
__ai uint32x2_t vreinterpret_u32_f16 (float16x4_t __p0)
 
__ai uint32x2_t vreinterpret_u32_s32 (int32x2_t __p0)
 
__ai uint32x2_t vreinterpret_u32_s64 (int64x1_t __p0)
 
__ai uint32x2_t vreinterpret_u32_s16 (int16x4_t __p0)
 
__ai uint64x1_t vreinterpret_u64_p8 (poly8x8_t __p0)
 
__ai uint64x1_t vreinterpret_u64_p16 (poly16x4_t __p0)
 
__ai uint64x1_t vreinterpret_u64_u8 (uint8x8_t __p0)
 
__ai uint64x1_t vreinterpret_u64_u32 (uint32x2_t __p0)
 
__ai uint64x1_t vreinterpret_u64_u16 (uint16x4_t __p0)
 
__ai uint64x1_t vreinterpret_u64_s8 (int8x8_t __p0)
 
__ai uint64x1_t vreinterpret_u64_f32 (float32x2_t __p0)
 
__ai uint64x1_t vreinterpret_u64_f16 (float16x4_t __p0)
 
__ai uint64x1_t vreinterpret_u64_s32 (int32x2_t __p0)
 
__ai uint64x1_t vreinterpret_u64_s64 (int64x1_t __p0)
 
__ai uint64x1_t vreinterpret_u64_s16 (int16x4_t __p0)
 
__ai uint16x4_t vreinterpret_u16_p8 (poly8x8_t __p0)
 
__ai uint16x4_t vreinterpret_u16_p16 (poly16x4_t __p0)
 
__ai uint16x4_t vreinterpret_u16_u8 (uint8x8_t __p0)
 
__ai uint16x4_t vreinterpret_u16_u32 (uint32x2_t __p0)
 
__ai uint16x4_t vreinterpret_u16_u64 (uint64x1_t __p0)
 
__ai uint16x4_t vreinterpret_u16_s8 (int8x8_t __p0)
 
__ai uint16x4_t vreinterpret_u16_f32 (float32x2_t __p0)
 
__ai uint16x4_t vreinterpret_u16_f16 (float16x4_t __p0)
 
__ai uint16x4_t vreinterpret_u16_s32 (int32x2_t __p0)
 
__ai uint16x4_t vreinterpret_u16_s64 (int64x1_t __p0)
 
__ai uint16x4_t vreinterpret_u16_s16 (int16x4_t __p0)
 
__ai int8x8_t vreinterpret_s8_p8 (poly8x8_t __p0)
 
__ai int8x8_t vreinterpret_s8_p16 (poly16x4_t __p0)
 
__ai int8x8_t vreinterpret_s8_u8 (uint8x8_t __p0)
 
__ai int8x8_t vreinterpret_s8_u32 (uint32x2_t __p0)
 
__ai int8x8_t vreinterpret_s8_u64 (uint64x1_t __p0)
 
__ai int8x8_t vreinterpret_s8_u16 (uint16x4_t __p0)
 
__ai int8x8_t vreinterpret_s8_f32 (float32x2_t __p0)
 
__ai int8x8_t vreinterpret_s8_f16 (float16x4_t __p0)
 
__ai int8x8_t vreinterpret_s8_s32 (int32x2_t __p0)
 
__ai int8x8_t vreinterpret_s8_s64 (int64x1_t __p0)
 
__ai int8x8_t vreinterpret_s8_s16 (int16x4_t __p0)
 
__ai float32x2_t vreinterpret_f32_p8 (poly8x8_t __p0)
 
__ai float32x2_t vreinterpret_f32_p16 (poly16x4_t __p0)
 
__ai float32x2_t vreinterpret_f32_u8 (uint8x8_t __p0)
 
__ai float32x2_t vreinterpret_f32_u32 (uint32x2_t __p0)
 
__ai float32x2_t vreinterpret_f32_u64 (uint64x1_t __p0)
 
__ai float32x2_t vreinterpret_f32_u16 (uint16x4_t __p0)
 
__ai float32x2_t vreinterpret_f32_s8 (int8x8_t __p0)
 
__ai float32x2_t vreinterpret_f32_f16 (float16x4_t __p0)
 
__ai float32x2_t vreinterpret_f32_s32 (int32x2_t __p0)
 
__ai float32x2_t vreinterpret_f32_s64 (int64x1_t __p0)
 
__ai float32x2_t vreinterpret_f32_s16 (int16x4_t __p0)
 
__ai float16x4_t vreinterpret_f16_p8 (poly8x8_t __p0)
 
__ai float16x4_t vreinterpret_f16_p16 (poly16x4_t __p0)
 
__ai float16x4_t vreinterpret_f16_u8 (uint8x8_t __p0)
 
__ai float16x4_t vreinterpret_f16_u32 (uint32x2_t __p0)
 
__ai float16x4_t vreinterpret_f16_u64 (uint64x1_t __p0)
 
__ai float16x4_t vreinterpret_f16_u16 (uint16x4_t __p0)
 
__ai float16x4_t vreinterpret_f16_s8 (int8x8_t __p0)
 
__ai float16x4_t vreinterpret_f16_f32 (float32x2_t __p0)
 
__ai float16x4_t vreinterpret_f16_s32 (int32x2_t __p0)
 
__ai float16x4_t vreinterpret_f16_s64 (int64x1_t __p0)
 
__ai float16x4_t vreinterpret_f16_s16 (int16x4_t __p0)
 
__ai int32x2_t vreinterpret_s32_p8 (poly8x8_t __p0)
 
__ai int32x2_t vreinterpret_s32_p16 (poly16x4_t __p0)
 
__ai int32x2_t vreinterpret_s32_u8 (uint8x8_t __p0)
 
__ai int32x2_t vreinterpret_s32_u32 (uint32x2_t __p0)
 
__ai int32x2_t vreinterpret_s32_u64 (uint64x1_t __p0)
 
__ai int32x2_t vreinterpret_s32_u16 (uint16x4_t __p0)
 
__ai int32x2_t vreinterpret_s32_s8 (int8x8_t __p0)
 
__ai int32x2_t vreinterpret_s32_f32 (float32x2_t __p0)
 
__ai int32x2_t vreinterpret_s32_f16 (float16x4_t __p0)
 
__ai int32x2_t vreinterpret_s32_s64 (int64x1_t __p0)
 
__ai int32x2_t vreinterpret_s32_s16 (int16x4_t __p0)
 
__ai int64x1_t vreinterpret_s64_p8 (poly8x8_t __p0)
 
__ai int64x1_t vreinterpret_s64_p16 (poly16x4_t __p0)
 
__ai int64x1_t vreinterpret_s64_u8 (uint8x8_t __p0)
 
__ai int64x1_t vreinterpret_s64_u32 (uint32x2_t __p0)
 
__ai int64x1_t vreinterpret_s64_u64 (uint64x1_t __p0)
 
__ai int64x1_t vreinterpret_s64_u16 (uint16x4_t __p0)
 
__ai int64x1_t vreinterpret_s64_s8 (int8x8_t __p0)
 
__ai int64x1_t vreinterpret_s64_f32 (float32x2_t __p0)
 
__ai int64x1_t vreinterpret_s64_f16 (float16x4_t __p0)
 
__ai int64x1_t vreinterpret_s64_s32 (int32x2_t __p0)
 
__ai int64x1_t vreinterpret_s64_s16 (int16x4_t __p0)
 
__ai int16x4_t vreinterpret_s16_p8 (poly8x8_t __p0)
 
__ai int16x4_t vreinterpret_s16_p16 (poly16x4_t __p0)
 
__ai int16x4_t vreinterpret_s16_u8 (uint8x8_t __p0)
 
__ai int16x4_t vreinterpret_s16_u32 (uint32x2_t __p0)
 
__ai int16x4_t vreinterpret_s16_u64 (uint64x1_t __p0)
 
__ai int16x4_t vreinterpret_s16_u16 (uint16x4_t __p0)
 
__ai int16x4_t vreinterpret_s16_s8 (int8x8_t __p0)
 
__ai int16x4_t vreinterpret_s16_f32 (float32x2_t __p0)
 
__ai int16x4_t vreinterpret_s16_f16 (float16x4_t __p0)
 
__ai int16x4_t vreinterpret_s16_s32 (int32x2_t __p0)
 
__ai int16x4_t vreinterpret_s16_s64 (int64x1_t __p0)
 
__ai uint8x16_t vabaq_u8 (uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2)
 
__ai uint32x4_t vabaq_u32 (uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2)
 
__ai uint16x8_t vabaq_u16 (uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2)
 
__ai int8x16_t vabaq_s8 (int8x16_t __p0, int8x16_t __p1, int8x16_t __p2)
 
__ai int32x4_t vabaq_s32 (int32x4_t __p0, int32x4_t __p1, int32x4_t __p2)
 
__ai int16x8_t vabaq_s16 (int16x8_t __p0, int16x8_t __p1, int16x8_t __p2)
 
__ai uint8x8_t vaba_u8 (uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint32x2_t vaba_u32 (uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint16x4_t vaba_u16 (uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int8x8_t vaba_s8 (int8x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int32x2_t vaba_s32 (int32x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int16x4_t vaba_s16 (int16x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint16x8_t vabdl_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint16x8_t __noswap_vabdl_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vabdl_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint64x2_t __noswap_vabdl_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vabdl_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai uint32x4_t __noswap_vabdl_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vabdl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int16x8_t __noswap_vabdl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vabdl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int64x2_t __noswap_vabdl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vabdl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai int32x4_t __noswap_vabdl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vaddl_u8 (uint8x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vaddl_u32 (uint32x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vaddl_u16 (uint16x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vaddl_s8 (int8x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vaddl_s32 (int32x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vaddl_s16 (int16x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vaddw_u8 (uint16x8_t __p0, uint8x8_t __p1)
 
__ai uint64x2_t vaddw_u32 (uint64x2_t __p0, uint32x2_t __p1)
 
__ai uint32x4_t vaddw_u16 (uint32x4_t __p0, uint16x4_t __p1)
 
__ai int16x8_t vaddw_s8 (int16x8_t __p0, int8x8_t __p1)
 
__ai int64x2_t vaddw_s32 (int64x2_t __p0, int32x2_t __p1)
 
__ai int32x4_t vaddw_s16 (int32x4_t __p0, int16x4_t __p1)
 
__ai uint16x8_t vmlal_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint16x8_t __noswap_vmlal_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint64x2_t vmlal_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint64x2_t __noswap_vmlal_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint32x4_t vmlal_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai uint32x4_t __noswap_vmlal_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int16x8_t vmlal_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int16x8_t __noswap_vmlal_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int64x2_t vmlal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x2_t __noswap_vmlal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int32x4_t vmlal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int32x4_t __noswap_vmlal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint64x2_t vmlal_n_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint64x2_t __noswap_vmlal_n_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint32x4_t vmlal_n_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai uint32x4_t __noswap_vmlal_n_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai int64x2_t vmlal_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int64x2_t __noswap_vmlal_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int32x4_t vmlal_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int32x4_t __noswap_vmlal_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai uint16x8_t vmlsl_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint16x8_t __noswap_vmlsl_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint64x2_t vmlsl_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint64x2_t __noswap_vmlsl_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint32x4_t vmlsl_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai uint32x4_t __noswap_vmlsl_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int16x8_t vmlsl_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int16x8_t __noswap_vmlsl_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int64x2_t vmlsl_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x2_t __noswap_vmlsl_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int32x4_t vmlsl_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int32x4_t __noswap_vmlsl_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai uint64x2_t vmlsl_n_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint64x2_t __noswap_vmlsl_n_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2)
 
__ai uint32x4_t vmlsl_n_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai uint32x4_t __noswap_vmlsl_n_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2)
 
__ai int64x2_t vmlsl_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int64x2_t __noswap_vmlsl_n_s32 (int64x2_t __p0, int32x2_t __p1, int32_t __p2)
 
__ai int32x4_t vmlsl_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai int32x4_t __noswap_vmlsl_n_s16 (int32x4_t __p0, int16x4_t __p1, int16_t __p2)
 
__ai uint16x8_t vabal_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint16x8_t __noswap_vabal_u8 (uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2)
 
__ai uint64x2_t vabal_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint64x2_t __noswap_vabal_u32 (uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2)
 
__ai uint32x4_t vabal_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai uint32x4_t __noswap_vabal_u16 (uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2)
 
__ai int16x8_t vabal_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int16x8_t __noswap_vabal_s8 (int16x8_t __p0, int8x8_t __p1, int8x8_t __p2)
 
__ai int64x2_t vabal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int64x2_t __noswap_vabal_s32 (int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
 
__ai int32x4_t vabal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 
__ai int32x4_t __noswap_vabal_s16 (int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
 

Macro Definition Documentation

#define __ai   static inline __attribute__((__always_inline__, __nodebug__))
#define __noswap_vget_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32_t __ret; \
__ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define __noswap_vget_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16_t __ret; \
__ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define __noswap_vget_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8_t __ret; \
__ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define __noswap_vget_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16_t __ret; \
__ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32_t __ret; \
__ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64_t __ret; \
__ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8_t __ret; \
__ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16_t __ret; \
__ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32_t __ret; \
__ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64_t __ret; \
__ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vget_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8_t __ret; \
__ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32_t __ret; \
__ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define __noswap_vgetq_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8_t __s0 = __p0; \
poly16_t __ret; \
__ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define __noswap_vgetq_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16_t __s0 = __p0; \
poly8_t __ret; \
__ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define __noswap_vgetq_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16_t __ret; \
__ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32_t __ret; \
__ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64_t __ret; \
__ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8_t __ret; \
__ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16_t __ret; \
__ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32_t __ret; \
__ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64_t __ret; \
__ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vgetq_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8_t __ret; \
__ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
__ret; \
})
#define __noswap_vqrshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
__ret; \
})
#define __noswap_vqrshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
__ret; \
})
#define __noswap_vqrshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
__ret; \
})
#define __noswap_vqrshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vqrshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vqrshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define __noswap_vqrshrun_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vqrshrun_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vqrshrun_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define __noswap_vqshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
__ret; \
})
#define __noswap_vqshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
__ret; \
})
#define __noswap_vqshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
__ret; \
})
#define __noswap_vqshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vqshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vqshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define __noswap_vqshrun_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vqshrun_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vqshrun_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define __noswap_vrshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
__ret; \
})
#define __noswap_vrshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
__ret; \
})
#define __noswap_vrshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
__ret; \
})
#define __noswap_vrshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vrshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vrshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define __noswap_vset_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define __noswap_vset_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16_t __s0 = __p0; \
poly16x4_t __s1 = __p1; \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define __noswap_vset_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8_t __s0 = __p0; \
poly8x8_t __s1 = __p1; \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define __noswap_vset_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vset_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32_t __s0 = __p0; \
float32x4_t __s1 = __p1; \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define __noswap_vsetq_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16_t __s0 = __p0; \
poly16x8_t __s1 = __p1; \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define __noswap_vsetq_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8_t __s0 = __p0; \
poly8x16_t __s1 = __p1; \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define __noswap_vsetq_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vsetq_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
__ret; \
})
#define __noswap_vshll_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
__ret; \
})
#define __noswap_vshll_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
__ret; \
})
#define __noswap_vshll_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
__ret; \
})
#define __noswap_vshll_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
__ret; \
})
#define __noswap_vshll_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
__ret; \
})
#define __noswap_vshll_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
__ret; \
})
#define __noswap_vshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
__ret; \
})
#define __noswap_vshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
__ret; \
})
#define __noswap_vshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
__ret; \
})
#define __noswap_vshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
__ret; \
})
#define __noswap_vshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
__ret; \
})
#define __noswap_vshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
__ret; \
})
#define vcvt_n_f32_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vcvt_n_f32_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vcvt_n_s32_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vcvt_n_u32_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vcvtq_n_f32_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vcvtq_n_f32_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vcvtq_n_s32_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vcvtq_n_u32_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vdup_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vdup_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __ret; \
__ret = __builtin_shufflevector(__s0, __s0, __p1); \
__ret; \
})
#define vdup_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdup_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vdup_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = __builtin_shufflevector(__s0, __s0, __p1); \
__ret; \
})
#define vdup_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdup_n_f16 (   __p0)
Value:
__extension__ ({ \
float16_t __s0 = __p0; \
float16x4_t __ret; \
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__fp16 float16_t
Definition: arm_neon.h:34
#define vdupq_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x2_t __ret; \
__ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vdupq_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x4_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x2_t __ret; \
__ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vdupq_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vdupq_n_f16 (   __p0)
Value:
__extension__ ({ \
float16_t __s0 = __p0; \
float16x8_t __ret; \
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
__fp16 float16_t
Definition: arm_neon.h:34
#define vext_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vext_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vext_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vext_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vext_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vext_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vext_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vext_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vext_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vext_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vext_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __s1 = __p1; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vextq_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8_t __s0 = __p0; \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x16_t __s0 = __p0; \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vextq_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vextq_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vextq_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vextq_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vextq_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vget_lane_f16 (   __p0_243,
  __p1_243 
)
Value:
__extension__ ({ \
float16x4_t __s0_243 = __p0_243; \
float16x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
float16_t __ret_243; \
float16x4_t __reint_243 = __rev0_243; \
int16_t __reint1_243 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_243, __p1_243); \
__ret_243 = *(float16_t *) &__reint1_243; \
__ret_243; \
})
#define __noswap_vget_lane_s16(__p0, __p1)
Definition: arm_neon.h:6927
__fp16 float16_t
Definition: arm_neon.h:34
#define vget_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32_t __ret; \
__ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define vget_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16_t __ret; \
__ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define vget_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8_t __ret; \
__ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define vget_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16_t __ret; \
__ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vget_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32_t __ret; \
__ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vget_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64_t __ret; \
__ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
__ret; \
})
#define vget_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8_t __ret; \
__ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vget_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16_t __ret; \
__ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vget_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32_t __ret; \
__ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vget_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64_t __ret; \
__ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
__ret; \
})
#define vget_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8_t __ret; \
__ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_f16 (   __p0_245,
  __p1_245 
)
Value:
__extension__ ({ \
float16x8_t __s0_245 = __p0_245; \
float16x8_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 7, 6, 5, 4, 3, 2, 1, 0); \
float16_t __ret_245; \
float16x8_t __reint_245 = __rev0_245; \
int16_t __reint1_245 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_245, __p1_245); \
__ret_245 = *(float16_t *) &__reint1_245; \
__ret_245; \
})
#define __noswap_vgetq_lane_s16(__p0, __p1)
Definition: arm_neon.h:6722
__fp16 float16_t
Definition: arm_neon.h:34
#define vgetq_lane_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32_t __ret; \
__ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define vgetq_lane_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8_t __s0 = __p0; \
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16_t __ret; \
__ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define vgetq_lane_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16_t __s0 = __p0; \
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8_t __ret; \
__ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define vgetq_lane_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16_t __ret; \
__ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32_t __ret; \
__ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64_t __ret; \
__ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8_t __ret; \
__ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16_t __ret; \
__ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32_t __ret; \
__ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64_t __ret; \
__ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vgetq_lane_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8_t __ret; \
__ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
__ret; \
})
#define vld1_dup_f16 (   __p0)
Value:
__extension__ ({ \
float16x4_t __ret; \
__ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_f32 (   __p0)
Value:
__extension__ ({ \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_dup_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_s16 (   __p0)
Value:
__extension__ ({ \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_s32 (   __p0)
Value:
__extension__ ({ \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_dup_s64 (   __p0)
Value:
__extension__ ({ \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
__ret; \
})
#define vld1_dup_s8 (   __p0)
Value:
__extension__ ({ \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_dup_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_dup_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
__ret; \
})
#define vld1_dup_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_f16 (   __p0)
Value:
__extension__ ({ \
float16x4_t __ret; \
__ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_f32 (   __p0)
Value:
__extension__ ({ \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4_t __s1 = __p1; \
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float16x4_t __ret; \
__ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2_t __s1 = __p1; \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s1 = __p1; \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s1 = __p1; \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vld1_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s1 = __p1; \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vld1_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_s16 (   __p0)
Value:
__extension__ ({ \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_s32 (   __p0)
Value:
__extension__ ({ \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_s64 (   __p0)
Value:
__extension__ ({ \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
__ret; \
})
#define vld1_s8 (   __p0)
Value:
__extension__ ({ \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
__ret; \
})
#define vld1_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_f16 (   __p0)
Value:
__extension__ ({ \
float16x8_t __ret; \
__ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_f32 (   __p0)
Value:
__extension__ ({ \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_p16 (   __p0)
Value:
__extension__ ({ \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_p8 (   __p0)
Value:
__extension__ ({ \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_s16 (   __p0)
Value:
__extension__ ({ \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_s32 (   __p0)
Value:
__extension__ ({ \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_s64 (   __p0)
Value:
__extension__ ({ \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_dup_s8 (   __p0)
Value:
__extension__ ({ \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_u16 (   __p0)
Value:
__extension__ ({ \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_u32 (   __p0)
Value:
__extension__ ({ \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_dup_u64 (   __p0)
Value:
__extension__ ({ \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_dup_u8 (   __p0)
Value:
__extension__ ({ \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_f16 (   __p0)
Value:
__extension__ ({ \
float16x8_t __ret; \
__ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_f32 (   __p0)
Value:
__extension__ ({ \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8_t __s1 = __p1; \
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
float16x8_t __ret; \
__ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4_t __s1 = __p1; \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s1 = __p1; \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s1 = __p1; \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s1 = __p1; \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_p16 (   __p0)
Value:
__extension__ ({ \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_p8 (   __p0)
Value:
__extension__ ({ \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_s16 (   __p0)
Value:
__extension__ ({ \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_s32 (   __p0)
Value:
__extension__ ({ \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_s64 (   __p0)
Value:
__extension__ ({ \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_s8 (   __p0)
Value:
__extension__ ({ \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_u16 (   __p0)
Value:
__extension__ ({ \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_u32 (   __p0)
Value:
__extension__ ({ \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vld1q_u64 (   __p0)
Value:
__extension__ ({ \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vld1q_u8 (   __p0)
Value:
__extension__ ({ \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vld2_dup_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x2_t float16x4x2_t
#define vld2_dup_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct float32x2x2_t float32x2x2_t
#define vld2_dup_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x2_t poly16x4x2_t
#define vld2_dup_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x2_t poly8x8x2_t
#define vld2_dup_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x2_t int16x4x2_t
#define vld2_dup_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct int32x2x2_t int32x2x2_t
#define vld2_dup_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x2_t int64x1x2_t
#define vld2_dup_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x2_t int8x8x2_t
#define vld2_dup_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x2_t uint16x4x2_t
#define vld2_dup_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct uint32x2x2_t uint32x2x2_t
#define vld2_dup_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x2_t uint64x1x2_t
#define vld2_dup_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x2_t __ret; \
__builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x2_t uint8x8x2_t
#define vld2_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x2_t float16x4x2_t
#define vld2_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct float32x2x2_t float32x2x2_t
#define vld2_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x2_t __s1 = __p1; \
float16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
float16x4_t val[2]
Definition: arm_neon.h:146
struct float16x4x2_t float16x4x2_t
#define vld2_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x2_t __s1 = __p1; \
float32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct float32x2x2_t float32x2x2_t
float32x2_t val[2]
Definition: arm_neon.h:154
#define vld2_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x2_t __s1 = __p1; \
poly16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
poly16x4x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x2_t poly16x4x2_t
poly16x4_t val[2]
Definition: arm_neon.h:180
#define vld2_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x2_t __s1 = __p1; \
poly8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
poly8x8_t val[2]
Definition: arm_neon.h:172
struct poly8x8x2_t poly8x8x2_t
#define vld2_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x2_t __s1 = __p1; \
int16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
int16x4x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x2_t int16x4x2_t
int16x4_t val[2]
Definition: arm_neon.h:90
#define vld2_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x2_t __s1 = __p1; \
int32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
int32x2x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
int32x2_t val[2]
Definition: arm_neon.h:98
struct int32x2x2_t int32x2x2_t
#define vld2_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x2_t __s1 = __p1; \
int8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int8x8_t val[2]
Definition: arm_neon.h:82
struct int8x8x2_t int8x8x2_t
#define vld2_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x2_t __s1 = __p1; \
uint16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
uint16x4x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
uint16x4_t val[2]
Definition: arm_neon.h:122
struct uint16x4x2_t uint16x4x2_t
#define vld2_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x2_t __s1 = __p1; \
uint32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
uint32x2x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct uint32x2x2_t uint32x2x2_t
uint32x2_t val[2]
Definition: arm_neon.h:130
#define vld2_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x2_t __s1 = __p1; \
uint8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8x2_t __ret; \
__builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x2_t uint8x8x2_t
uint8x8_t val[2]
Definition: arm_neon.h:114
#define vld2_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x2_t poly16x4x2_t
#define vld2_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x2_t poly8x8x2_t
#define vld2_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x2_t int16x4x2_t
#define vld2_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct int32x2x2_t int32x2x2_t
#define vld2_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x2_t int64x1x2_t
#define vld2_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x2_t int8x8x2_t
#define vld2_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x2_t uint16x4x2_t
#define vld2_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret; \
})
struct uint32x2x2_t uint32x2x2_t
#define vld2_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x2_t uint64x1x2_t
#define vld2_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x2_t __ret; \
__builtin_neon_vld2_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x2_t uint8x8x2_t
#define vld2q_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2q_v(&__ret, __p0, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct float16x8x2_t float16x8x2_t
#define vld2q_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld2q_v(&__ret, __p0, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct float32x4x2_t float32x4x2_t
#define vld2q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x2_t __s1 = __p1; \
float16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
float16x8_t val[2]
Definition: arm_neon.h:150
struct float16x8x2_t float16x8x2_t
#define vld2q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x2_t __s1 = __p1; \
float32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct float32x4x2_t float32x4x2_t
float32x4_t val[2]
Definition: arm_neon.h:158
#define vld2q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x2_t __s1 = __p1; \
poly16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8x2_t __ret; \
__builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly16x8x2_t poly16x8x2_t
poly16x8_t val[2]
Definition: arm_neon.h:184
#define vld2q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x2_t __s1 = __p1; \
int16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8x2_t __ret; \
__builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x2_t int16x8x2_t
int16x8_t val[2]
Definition: arm_neon.h:94
#define vld2q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x2_t __s1 = __p1; \
int32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
int32x4x2_t __ret; \
__builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct int32x4x2_t int32x4x2_t
int32x4_t val[2]
Definition: arm_neon.h:102
#define vld2q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x2_t __s1 = __p1; \
uint16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8x2_t __ret; \
__builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint16x8x2_t uint16x8x2_t
uint16x8_t val[2]
Definition: arm_neon.h:126
#define vld2q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x2_t __s1 = __p1; \
uint32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
uint32x4x2_t __ret; \
__builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
uint32x4_t val[2]
Definition: arm_neon.h:134
struct uint32x4x2_t uint32x4x2_t
#define vld2q_p16 (   __p0)
Value:
__extension__ ({ \
poly16x8x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly16x8x2_t poly16x8x2_t
#define vld2q_p8 (   __p0)
Value:
__extension__ ({ \
poly8x16x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 36); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x16x2_t poly8x16x2_t
#define vld2q_s16 (   __p0)
Value:
__extension__ ({ \
int16x8x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x2_t int16x8x2_t
#define vld2q_s32 (   __p0)
Value:
__extension__ ({ \
int32x4x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct int32x4x2_t int32x4x2_t
#define vld2q_s8 (   __p0)
Value:
__extension__ ({ \
int8x16x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 32); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x16x2_t int8x16x2_t
#define vld2q_u16 (   __p0)
Value:
__extension__ ({ \
uint16x8x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint16x8x2_t uint16x8x2_t
#define vld2q_u32 (   __p0)
Value:
__extension__ ({ \
uint32x4x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret; \
})
struct uint32x4x2_t uint32x4x2_t
#define vld2q_u8 (   __p0)
Value:
__extension__ ({ \
uint8x16x2_t __ret; \
__builtin_neon_vld2q_v(&__ret, __p0, 48); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x16x2_t uint8x16x2_t
#define vld3_dup_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x3_t float16x4x3_t
#define vld3_dup_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct float32x2x3_t float32x2x3_t
#define vld3_dup_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x3_t poly16x4x3_t
#define vld3_dup_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x3_t poly8x8x3_t
#define vld3_dup_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x3_t int16x4x3_t
#define vld3_dup_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct int32x2x3_t int32x2x3_t
#define vld3_dup_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x3_t int64x1x3_t
#define vld3_dup_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x3_t int8x8x3_t
#define vld3_dup_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x3_t uint16x4x3_t
#define vld3_dup_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct uint32x2x3_t uint32x2x3_t
#define vld3_dup_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x3_t uint64x1x3_t
#define vld3_dup_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x3_t __ret; \
__builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x3_t uint8x8x3_t
#define vld3_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x3_t float16x4x3_t
#define vld3_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct float32x2x3_t float32x2x3_t
#define vld3_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x3_t __s1 = __p1; \
float16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
float16x4_t val[3]
Definition: arm_neon.h:262
struct float16x4x3_t float16x4x3_t
#define vld3_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x3_t __s1 = __p1; \
float32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct float32x2x3_t float32x2x3_t
float32x2_t val[3]
Definition: arm_neon.h:270
#define vld3_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x3_t __s1 = __p1; \
poly16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
poly16x4x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x3_t poly16x4x3_t
poly16x4_t val[3]
Definition: arm_neon.h:296
#define vld3_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x3_t __s1 = __p1; \
poly8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x3_t poly8x8x3_t
poly8x8_t val[3]
Definition: arm_neon.h:288
#define vld3_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x3_t __s1 = __p1; \
int16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
int16x4x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x3_t int16x4x3_t
int16x4_t val[3]
Definition: arm_neon.h:206
#define vld3_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x3_t __s1 = __p1; \
int32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
int32x2x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct int32x2x3_t int32x2x3_t
int32x2_t val[3]
Definition: arm_neon.h:214
#define vld3_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x3_t __s1 = __p1; \
int8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int8x8_t val[3]
Definition: arm_neon.h:198
struct int8x8x3_t int8x8x3_t
#define vld3_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x3_t __s1 = __p1; \
uint16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
uint16x4x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
uint16x4_t val[3]
Definition: arm_neon.h:238
struct uint16x4x3_t uint16x4x3_t
#define vld3_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x3_t __s1 = __p1; \
uint32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
uint32x2x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
uint32x2_t val[3]
Definition: arm_neon.h:246
struct uint32x2x3_t uint32x2x3_t
#define vld3_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x3_t __s1 = __p1; \
uint8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8x3_t __ret; \
__builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x3_t uint8x8x3_t
uint8x8_t val[3]
Definition: arm_neon.h:230
#define vld3_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x3_t poly16x4x3_t
#define vld3_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x3_t poly8x8x3_t
#define vld3_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x3_t int16x4x3_t
#define vld3_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct int32x2x3_t int32x2x3_t
#define vld3_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x3_t int64x1x3_t
#define vld3_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x3_t int8x8x3_t
#define vld3_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x3_t uint16x4x3_t
#define vld3_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret; \
})
struct uint32x2x3_t uint32x2x3_t
#define vld3_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x3_t uint64x1x3_t
#define vld3_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x3_t __ret; \
__builtin_neon_vld3_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x3_t uint8x8x3_t
#define vld3q_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3q_v(&__ret, __p0, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct float16x8x3_t float16x8x3_t
#define vld3q_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld3q_v(&__ret, __p0, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct float32x4x3_t float32x4x3_t
#define vld3q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x3_t __s1 = __p1; \
float16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
float16x8_t val[3]
Definition: arm_neon.h:266
struct float16x8x3_t float16x8x3_t
#define vld3q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x3_t __s1 = __p1; \
float32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct float32x4x3_t float32x4x3_t
float32x4_t val[3]
Definition: arm_neon.h:274
#define vld3q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x3_t __s1 = __p1; \
poly16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8x3_t __ret; \
__builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly16x8x3_t poly16x8x3_t
poly16x8_t val[3]
Definition: arm_neon.h:300
#define vld3q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x3_t __s1 = __p1; \
int16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8x3_t __ret; \
__builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x3_t int16x8x3_t
int16x8_t val[3]
Definition: arm_neon.h:210
#define vld3q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x3_t __s1 = __p1; \
int32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
int32x4x3_t __ret; \
__builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
int32x4_t val[3]
Definition: arm_neon.h:218
struct int32x4x3_t int32x4x3_t
#define vld3q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x3_t __s1 = __p1; \
uint16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8x3_t __ret; \
__builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
uint16x8_t val[3]
Definition: arm_neon.h:242
struct uint16x8x3_t uint16x8x3_t
#define vld3q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x3_t __s1 = __p1; \
uint32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
uint32x4x3_t __ret; \
__builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct uint32x4x3_t uint32x4x3_t
uint32x4_t val[3]
Definition: arm_neon.h:250
#define vld3q_p16 (   __p0)
Value:
__extension__ ({ \
poly16x8x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly16x8x3_t poly16x8x3_t
#define vld3q_p8 (   __p0)
Value:
__extension__ ({ \
poly8x16x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 36); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x16x3_t poly8x16x3_t
#define vld3q_s16 (   __p0)
Value:
__extension__ ({ \
int16x8x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x3_t int16x8x3_t
#define vld3q_s32 (   __p0)
Value:
__extension__ ({ \
int32x4x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct int32x4x3_t int32x4x3_t
#define vld3q_s8 (   __p0)
Value:
__extension__ ({ \
int8x16x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 32); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x16x3_t int8x16x3_t
#define vld3q_u16 (   __p0)
Value:
__extension__ ({ \
uint16x8x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint16x8x3_t uint16x8x3_t
#define vld3q_u32 (   __p0)
Value:
__extension__ ({ \
uint32x4x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret; \
})
struct uint32x4x3_t uint32x4x3_t
#define vld3q_u8 (   __p0)
Value:
__extension__ ({ \
uint8x16x3_t __ret; \
__builtin_neon_vld3q_v(&__ret, __p0, 48); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x16x3_t uint8x16x3_t
#define vld4_dup_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x4_t float16x4x4_t
#define vld4_dup_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct float32x2x4_t float32x2x4_t
#define vld4_dup_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x4_t poly16x4x4_t
#define vld4_dup_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x4_t poly8x8x4_t
#define vld4_dup_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x4_t int16x4x4_t
#define vld4_dup_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct int32x2x4_t int32x2x4_t
#define vld4_dup_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x4_t int64x1x4_t
#define vld4_dup_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x4_t int8x8x4_t
#define vld4_dup_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x4_t uint16x4x4_t
#define vld4_dup_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct uint32x2x4_t uint32x2x4_t
#define vld4_dup_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x4_t uint64x1x4_t
#define vld4_dup_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x4_t __ret; \
__builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x4_t uint8x8x4_t
#define vld4_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4_v(&__ret, __p0, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct float16x4x4_t float16x4x4_t
#define vld4_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4_v(&__ret, __p0, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct float32x2x4_t float32x2x4_t
#define vld4_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x4_t __s1 = __p1; \
float16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
float16x4_t val[4]
Definition: arm_neon.h:378
struct float16x4x4_t float16x4x4_t
#define vld4_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x4_t __s1 = __p1; \
float32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct float32x2x4_t float32x2x4_t
float32x2_t val[4]
Definition: arm_neon.h:386
#define vld4_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x4_t __s1 = __p1; \
poly16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
poly16x4x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x4_t poly16x4x4_t
poly16x4_t val[4]
Definition: arm_neon.h:412
#define vld4_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x4_t __s1 = __p1; \
poly8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x4_t poly8x8x4_t
poly8x8_t val[4]
Definition: arm_neon.h:404
#define vld4_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x4_t __s1 = __p1; \
int16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
int16x4x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x4_t int16x4x4_t
int16x4_t val[4]
Definition: arm_neon.h:322
#define vld4_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x4_t __s1 = __p1; \
int32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
int32x2x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct int32x2x4_t int32x2x4_t
int32x2_t val[4]
Definition: arm_neon.h:330
#define vld4_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x4_t __s1 = __p1; \
int8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int8x8_t val[4]
Definition: arm_neon.h:314
struct int8x8x4_t int8x8x4_t
#define vld4_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x4_t __s1 = __p1; \
uint16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
uint16x4x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x4_t uint16x4x4_t
uint16x4_t val[4]
Definition: arm_neon.h:354
#define vld4_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x4_t __s1 = __p1; \
uint32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
uint32x2x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct uint32x2x4_t uint32x2x4_t
uint32x2_t val[4]
Definition: arm_neon.h:362
#define vld4_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x4_t __s1 = __p1; \
uint8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8x4_t __ret; \
__builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
uint8x8_t val[4]
Definition: arm_neon.h:346
struct uint8x8x4_t uint8x8x4_t
#define vld4_p16 (   __p0)
Value:
__extension__ ({ \
poly16x4x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 5); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct poly16x4x4_t poly16x4x4_t
#define vld4_p8 (   __p0)
Value:
__extension__ ({ \
poly8x8x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 4); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x8x4_t poly8x8x4_t
#define vld4_s16 (   __p0)
Value:
__extension__ ({ \
int16x4x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 1); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct int16x4x4_t int16x4x4_t
#define vld4_s32 (   __p0)
Value:
__extension__ ({ \
int32x2x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 2); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct int32x2x4_t int32x2x4_t
#define vld4_s64 (   __p0)
Value:
__extension__ ({ \
int64x1x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 3); \
__ret; \
})
struct int64x1x4_t int64x1x4_t
#define vld4_s8 (   __p0)
Value:
__extension__ ({ \
int8x8x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 0); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x8x4_t int8x8x4_t
#define vld4_u16 (   __p0)
Value:
__extension__ ({ \
uint16x4x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 17); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct uint16x4x4_t uint16x4x4_t
#define vld4_u32 (   __p0)
Value:
__extension__ ({ \
uint32x2x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 18); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
__ret; \
})
struct uint32x2x4_t uint32x2x4_t
#define vld4_u64 (   __p0)
Value:
__extension__ ({ \
uint64x1x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 19); \
__ret; \
})
struct uint64x1x4_t uint64x1x4_t
#define vld4_u8 (   __p0)
Value:
__extension__ ({ \
uint8x8x4_t __ret; \
__builtin_neon_vld4_v(&__ret, __p0, 16); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x8x4_t uint8x8x4_t
#define vld4q_f16 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4q_v(&__ret, __p0, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct float16x8x4_t float16x8x4_t
#define vld4q_f32 (   __p0)
Value:
__extension__ ({ \
__builtin_neon_vld4q_v(&__ret, __p0, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct float32x4x4_t float32x4x4_t
#define vld4q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x4_t __s1 = __p1; \
float16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
float16x8_t val[4]
Definition: arm_neon.h:382
struct float16x8x4_t float16x8x4_t
#define vld4q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x4_t __s1 = __p1; \
float32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
float32x4_t val[4]
Definition: arm_neon.h:390
struct float32x4x4_t float32x4x4_t
#define vld4q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x4_t __s1 = __p1; \
poly16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8x4_t __ret; \
__builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
poly16x8_t val[4]
Definition: arm_neon.h:416
struct poly16x8x4_t poly16x8x4_t
#define vld4q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x4_t __s1 = __p1; \
int16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8x4_t __ret; \
__builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x4_t int16x8x4_t
int16x8_t val[4]
Definition: arm_neon.h:326
#define vld4q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x4_t __s1 = __p1; \
int32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
int32x4x4_t __ret; \
__builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct int32x4x4_t int32x4x4_t
int32x4_t val[4]
Definition: arm_neon.h:334
#define vld4q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x4_t __s1 = __p1; \
uint16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8x4_t __ret; \
__builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
uint16x8_t val[4]
Definition: arm_neon.h:358
struct uint16x8x4_t uint16x8x4_t
#define vld4q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x4_t __s1 = __p1; \
uint32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
uint32x4x4_t __ret; \
__builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
uint32x4_t val[4]
Definition: arm_neon.h:366
struct uint32x4x4_t uint32x4x4_t
#define vld4q_p16 (   __p0)
Value:
__extension__ ({ \
poly16x8x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 37); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly16x8x4_t poly16x8x4_t
#define vld4q_p8 (   __p0)
Value:
__extension__ ({ \
poly8x16x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 36); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct poly8x16x4_t poly8x16x4_t
#define vld4q_s16 (   __p0)
Value:
__extension__ ({ \
int16x8x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 33); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int16x8x4_t int16x8x4_t
#define vld4q_s32 (   __p0)
Value:
__extension__ ({ \
int32x4x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 34); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct int32x4x4_t int32x4x4_t
#define vld4q_s8 (   __p0)
Value:
__extension__ ({ \
int8x16x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 32); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct int8x16x4_t int8x16x4_t
#define vld4q_u16 (   __p0)
Value:
__extension__ ({ \
uint16x8x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 49); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint16x8x4_t uint16x8x4_t
#define vld4q_u32 (   __p0)
Value:
__extension__ ({ \
uint32x4x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 50); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
__ret; \
})
struct uint32x4x4_t uint32x4x4_t
#define vld4q_u8 (   __p0)
Value:
__extension__ ({ \
uint8x16x4_t __ret; \
__builtin_neon_vld4q_v(&__ret, __p0, 48); \
\
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
struct uint8x16x4_t uint8x16x4_t
#define vmla_lane_f32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __s2 = __p2; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
float32x2_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmla_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmla_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int32x2_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmla_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmla_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint32x2_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmlal_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:14892
#define vmlal_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int64x2_t __ret; \
__ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:14870
#define vmlal_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1)
Definition: arm_neon.h:14826
#define vmlal_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint64x2_t __ret; \
__ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1)
Definition: arm_neon.h:14804
#define vmlaq_lane_f32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __s1 = __p1; \
float32x2_t __s2 = __p2; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
float32x4_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmlaq_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmlaq_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int32x4_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmlaq_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmlaq_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint32x4_t __ret; \
__ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmls_lane_f32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __s2 = __p2; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
float32x2_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmls_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmls_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int32x2_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmls_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmls_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint32x2_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmlsl_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:14892
#define vmlsl_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int64x2_t __ret; \
__ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:14870
#define vmlsl_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1)
Definition: arm_neon.h:14826
#define vmlsl_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint64x2_t __ret; \
__ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1)
Definition: arm_neon.h:14804
#define vmlsq_lane_f32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x4_t __s1 = __p1; \
float32x2_t __s2 = __p2; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
float32x4_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmlsq_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmlsq_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int32x4_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmlsq_lane_u16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x4_t __s2 = __p2; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmlsq_lane_u32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x2_t __s2 = __p2; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
uint32x4_t __ret; \
__ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmov_n_f16 (   __p0)
Value:
__extension__ ({ \
float16_t __s0 = __p0; \
float16x4_t __ret; \
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__fp16 float16_t
Definition: arm_neon.h:34
#define vmovq_n_f16 (   __p0)
Value:
__extension__ ({ \
float16_t __s0 = __p0; \
float16x8_t __ret; \
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
__fp16 float16_t
Definition: arm_neon.h:34
#define vmul_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmul_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmul_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmul_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmul_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vmull_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:14892
#define vmull_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:14870
#define vmull_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1)
Definition: arm_neon.h:14826
#define vmull_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1)
Definition: arm_neon.h:14804
#define vmulq_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x4_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmulq_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmulq_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x4_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vmulq_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vmulq_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x4_t __ret; \
__ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqdmlal_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
Definition: arm_neon.h:17119
#define vqdmlal_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int64x2_t __ret; \
__ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
Definition: arm_neon.h:17096
#define vqdmlsl_lane_s16 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __s2 = __p2; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2)
Definition: arm_neon.h:17257
#define vqdmlsl_lane_s32 (   __p0,
  __p1,
  __p2,
  __p3 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __s2 = __p2; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
int64x2_t __ret; \
__ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2)
Definition: arm_neon.h:17234
#define vqdmulh_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:17437
#define vqdmulh_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:17415
#define vqdmulhq_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1)
Definition: arm_neon.h:17393
#define vqdmulhq_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1)
Definition: arm_neon.h:17371
#define vqdmull_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:17629
#define vqdmull_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:17607
#define vqrdmulh_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1)
Definition: arm_neon.h:18086
#define vqrdmulh_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1)
Definition: arm_neon.h:18064
#define vqrdmulhq_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1)
Definition: arm_neon.h:18042
#define vqrdmulhq_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x4_t __ret; \
__ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1)
Definition: arm_neon.h:18020
#define vqrshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqrshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqrshrun_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrun_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqrshrun_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshl_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshl_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshl_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
__ret; \
})
#define vqshl_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshl_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshl_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshl_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
__ret; \
})
#define vqshl_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshlq_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshlq_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshlq_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshlu_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshlu_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshlu_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
__ret; \
})
#define vqshlu_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshluq_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshluq_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshluq_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshluq_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vqshrun_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vqshrun_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vqshrun_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshr_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshr_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshr_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
__ret; \
})
#define vrshr_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshr_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshr_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshr_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
__ret; \
})
#define vrshr_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshrq_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrq_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshrq_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshrq_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrq_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrshrq_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrshrq_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrshrq_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsra_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrsra_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrsra_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vrsra_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsra_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrsra_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrsra_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vrsra_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrsraq_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vrsraq_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vrsraq_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vset_lane_f16 (   __p0_247,
  __p1_247,
  __p2_247 
)
Value:
__extension__ ({ \
float16_t __s0_247 = __p0_247; \
float16x4_t __s1_247 = __p1_247; \
float16x4_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 3, 2, 1, 0); \
float16x4_t __ret_247; \
float16_t __reint_247 = __s0_247; \
float16x4_t __reint1_247 = __rev1_247; \
int16x4_t __reint2_247 = __noswap_vset_lane_s16(*(int16_t *) &__reint_247, *(int16x4_t *) &__reint1_247, __p2_247); \
__ret_247 = *(float16x4_t *) &__reint2_247; \
__ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 3, 2, 1, 0); \
__ret_247; \
})
#define __noswap_vset_lane_s16(__p0, __p1, __p2)
Definition: arm_neon.h:22762
__fp16 float16_t
Definition: arm_neon.h:34
#define vset_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32_t __s0 = __p0; \
float32x2_t __s1 = __p1; \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
float32x2_t __ret; \
__ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define vset_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16_t __s0 = __p0; \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define vset_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8_t __s0 = __p0; \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define vset_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vset_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vset_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define vset_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vset_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vset_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vset_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
__ret; \
})
#define vset_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_f16 (   __p0_249,
  __p1_249,
  __p2_249 
)
Value:
__extension__ ({ \
float16_t __s0_249 = __p0_249; \
float16x8_t __s1_249 = __p1_249; \
float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \
float16x8_t __ret_249; \
float16_t __reint_249 = __s0_249; \
float16x8_t __reint1_249 = __rev1_249; \
int16x8_t __reint2_249 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_249, *(int16x8_t *) &__reint1_249, __p2_249); \
__ret_249 = *(float16x8_t *) &__reint2_249; \
__ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret_249; \
})
__fp16 float16_t
Definition: arm_neon.h:34
#define __noswap_vsetq_lane_s16(__p0, __p1, __p2)
Definition: arm_neon.h:22523
#define vsetq_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32_t __s0 = __p0; \
float32x4_t __s1 = __p1; \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
float32x4_t __ret; \
__ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
float float32_t
Definition: arm_neon.h:33
#define vsetq_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16_t __s0 = __p0; \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int16_t poly16_t
Definition: arm_neon.h:46
#define vsetq_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8_t __s0 = __p0; \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
int8_t poly8_t
Definition: arm_neon.h:45
#define vsetq_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsetq_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsetq_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsetq_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshl_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshl_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshl_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
__ret; \
})
#define vshl_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshl_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshl_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshl_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
__ret; \
})
#define vshl_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshll_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshll_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshll_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshll_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshll_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshll_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshlq_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshlq_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshlq_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshr_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshr_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshr_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
__ret; \
})
#define vshr_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshr_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshr_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshr_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
__ret; \
})
#define vshr_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrn_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrn_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshrn_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshrn_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrn_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshrn_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshrq_n_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrq_n_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshrq_n_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshrq_n_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrq_n_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vshrq_n_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vshrq_n_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vshrq_n_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsli_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vsli_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsli_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsli_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vsli_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8_t __s0 = __p0; \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x16_t __s0 = __p0; \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsliq_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsliq_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsliq_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsra_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsra_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsra_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vsra_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsra_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsra_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsra_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vsra_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsraq_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsraq_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsraq_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4_t __s0 = __p0; \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
poly16x4_t __ret; \
__ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8_t __s0 = __p0; \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x8_t __ret; \
__ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s0 = __p0; \
int16x4_t __s1 = __p1; \
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int16x4_t __ret; \
__ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s0 = __p0; \
int32x2_t __s1 = __p1; \
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int32x2_t __ret; \
__ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsri_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s0 = __p0; \
int64x1_t __s1 = __p1; \
int64x1_t __ret; \
__ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vsri_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s0 = __p0; \
int8x8_t __s1 = __p1; \
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x8_t __ret; \
__ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s0 = __p0; \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint16x4_t __ret; \
__ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsri_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s0 = __p0; \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint32x2_t __ret; \
__ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsri_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s0 = __p0; \
uint64x1_t __s1 = __p1; \
uint64x1_t __ret; \
__ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vsri_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s0 = __p0; \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x8_t __ret; \
__ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8_t __s0 = __p0; \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
poly16x8_t __ret; \
__ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x16_t __s0 = __p0; \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
poly8x16_t __ret; \
__ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s0 = __p0; \
int16x8_t __s1 = __p1; \
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
int16x8_t __ret; \
__ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s0 = __p0; \
int32x4_t __s1 = __p1; \
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
int32x4_t __ret; \
__ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s0 = __p0; \
int64x2_t __s1 = __p1; \
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
int64x2_t __ret; \
__ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsriq_n_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s0 = __p0; \
int8x16_t __s1 = __p1; \
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
int8x16_t __ret; \
__ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s0 = __p0; \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
uint16x8_t __ret; \
__ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
__ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s0 = __p0; \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
uint32x4_t __ret; \
__ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
__ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
__ret; \
})
#define vsriq_n_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s0 = __p0; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
uint64x2_t __ret; \
__ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#define vsriq_n_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s0 = __p0; \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
uint8x16_t __ret; \
__ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
__ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__ret; \
})
#define vst1_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x4_t __s1 = __p1; \
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
})
#define vst1_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2_t __s1 = __p1; \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
})
#define vst1_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4_t __s1 = __p1; \
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
})
#define vst1_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2_t __s1 = __p1; \
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
})
#define vst1_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
})
#define vst1_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
})
#define vst1_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4_t __s1 = __p1; \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
})
#define vst1_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2_t __s1 = __p1; \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
})
#define vst1_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x1_t __s1 = __p1; \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
})
#define vst1_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8_t __s1 = __p1; \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
})
#define vst1_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
})
#define vst1_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
})
#define vst1_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x1_t __s1 = __p1; \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
})
#define vst1_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
})
#define vst1_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4_t __s1 = __p1; \
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
})
#define vst1_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8_t __s1 = __p1; \
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
})
#define vst1_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4_t __s1 = __p1; \
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
})
#define vst1_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2_t __s1 = __p1; \
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
})
#define vst1_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1_t __s1 = __p1; \
__builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
})
#define vst1_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8_t __s1 = __p1; \
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
})
#define vst1_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4_t __s1 = __p1; \
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
})
#define vst1_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2_t __s1 = __p1; \
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
})
#define vst1_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1_t __s1 = __p1; \
__builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
})
#define vst1_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8_t __s1 = __p1; \
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
})
#define vst1q_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x8_t __s1 = __p1; \
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
})
#define vst1q_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4_t __s1 = __p1; \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
})
#define vst1q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8_t __s1 = __p1; \
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
})
#define vst1q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4_t __s1 = __p1; \
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
})
#define vst1q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
})
#define vst1q_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
})
#define vst1q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8_t __s1 = __p1; \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
})
#define vst1q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4_t __s1 = __p1; \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
})
#define vst1q_lane_s64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
})
#define vst1q_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x16_t __s1 = __p1; \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
})
#define vst1q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
})
#define vst1q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
})
#define vst1q_lane_u64 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
})
#define vst1q_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
})
#define vst1q_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8_t __s1 = __p1; \
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
})
#define vst1q_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16_t __s1 = __p1; \
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
})
#define vst1q_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8_t __s1 = __p1; \
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
})
#define vst1q_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4_t __s1 = __p1; \
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
})
#define vst1q_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
})
#define vst1q_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16_t __s1 = __p1; \
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
})
#define vst1q_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8_t __s1 = __p1; \
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
})
#define vst1q_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4_t __s1 = __p1; \
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
})
#define vst1q_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
})
#define vst1q_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16_t __s1 = __p1; \
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
})
#define vst2_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x4x2_t __s1 = __p1; \
float16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
})
float16x4_t val[2]
Definition: arm_neon.h:146
struct float16x4x2_t float16x4x2_t
#define vst2_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2x2_t __s1 = __p1; \
float32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
})
struct float32x2x2_t float32x2x2_t
float32x2_t val[2]
Definition: arm_neon.h:154
#define vst2_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x2_t __s1 = __p1; \
float16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
})
float16x4_t val[2]
Definition: arm_neon.h:146
struct float16x4x2_t float16x4x2_t
#define vst2_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x2_t __s1 = __p1; \
float32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
})
struct float32x2x2_t float32x2x2_t
float32x2_t val[2]
Definition: arm_neon.h:154
#define vst2_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x2_t __s1 = __p1; \
poly16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
})
struct poly16x4x2_t poly16x4x2_t
poly16x4_t val[2]
Definition: arm_neon.h:180
#define vst2_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x2_t __s1 = __p1; \
poly8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
})
poly8x8_t val[2]
Definition: arm_neon.h:172
struct poly8x8x2_t poly8x8x2_t
#define vst2_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x2_t __s1 = __p1; \
int16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
})
struct int16x4x2_t int16x4x2_t
int16x4_t val[2]
Definition: arm_neon.h:90
#define vst2_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x2_t __s1 = __p1; \
int32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
})
int32x2_t val[2]
Definition: arm_neon.h:98
struct int32x2x2_t int32x2x2_t
#define vst2_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x2_t __s1 = __p1; \
int8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
})
int8x8_t val[2]
Definition: arm_neon.h:82
struct int8x8x2_t int8x8x2_t
#define vst2_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x2_t __s1 = __p1; \
uint16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
})
uint16x4_t val[2]
Definition: arm_neon.h:122
struct uint16x4x2_t uint16x4x2_t
#define vst2_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x2_t __s1 = __p1; \
uint32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
})
struct uint32x2x2_t uint32x2x2_t
uint32x2_t val[2]
Definition: arm_neon.h:130
#define vst2_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x2_t __s1 = __p1; \
uint8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
})
struct uint8x8x2_t uint8x8x2_t
uint8x8_t val[2]
Definition: arm_neon.h:114
#define vst2_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4x2_t __s1 = __p1; \
poly16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
})
struct poly16x4x2_t poly16x4x2_t
poly16x4_t val[2]
Definition: arm_neon.h:180
#define vst2_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8x2_t __s1 = __p1; \
poly8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
})
poly8x8_t val[2]
Definition: arm_neon.h:172
struct poly8x8x2_t poly8x8x2_t
#define vst2_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4x2_t __s1 = __p1; \
int16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
})
struct int16x4x2_t int16x4x2_t
int16x4_t val[2]
Definition: arm_neon.h:90
#define vst2_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2x2_t __s1 = __p1; \
int32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
})
int32x2_t val[2]
Definition: arm_neon.h:98
struct int32x2x2_t int32x2x2_t
#define vst2_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1x2_t __s1 = __p1; \
__builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
})
struct int64x1x2_t int64x1x2_t
#define vst2_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8x2_t __s1 = __p1; \
int8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
})
int8x8_t val[2]
Definition: arm_neon.h:82
struct int8x8x2_t int8x8x2_t
#define vst2_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4x2_t __s1 = __p1; \
uint16x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
})
uint16x4_t val[2]
Definition: arm_neon.h:122
struct uint16x4x2_t uint16x4x2_t
#define vst2_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2x2_t __s1 = __p1; \
uint32x2x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
})
struct uint32x2x2_t uint32x2x2_t
uint32x2_t val[2]
Definition: arm_neon.h:130
#define vst2_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1x2_t __s1 = __p1; \
__builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
})
struct uint64x1x2_t uint64x1x2_t
#define vst2_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8x2_t __s1 = __p1; \
uint8x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
})
struct uint8x8x2_t uint8x8x2_t
uint8x8_t val[2]
Definition: arm_neon.h:114
#define vst2q_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x8x2_t __s1 = __p1; \
float16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
})
float16x8_t val[2]
Definition: arm_neon.h:150
struct float16x8x2_t float16x8x2_t
#define vst2q_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4x2_t __s1 = __p1; \
float32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
})
struct float32x4x2_t float32x4x2_t
float32x4_t val[2]
Definition: arm_neon.h:158
#define vst2q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x2_t __s1 = __p1; \
float16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
})
float16x8_t val[2]
Definition: arm_neon.h:150
struct float16x8x2_t float16x8x2_t
#define vst2q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x2_t __s1 = __p1; \
float32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
})
struct float32x4x2_t float32x4x2_t
float32x4_t val[2]
Definition: arm_neon.h:158
#define vst2q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x2_t __s1 = __p1; \
poly16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
})
struct poly16x8x2_t poly16x8x2_t
poly16x8_t val[2]
Definition: arm_neon.h:184
#define vst2q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x2_t __s1 = __p1; \
int16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
})
struct int16x8x2_t int16x8x2_t
int16x8_t val[2]
Definition: arm_neon.h:94
#define vst2q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x2_t __s1 = __p1; \
int32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
})
struct int32x4x2_t int32x4x2_t
int32x4_t val[2]
Definition: arm_neon.h:102
#define vst2q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x2_t __s1 = __p1; \
uint16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
})
struct uint16x8x2_t uint16x8x2_t
uint16x8_t val[2]
Definition: arm_neon.h:126
#define vst2q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x2_t __s1 = __p1; \
uint32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
})
uint32x4_t val[2]
Definition: arm_neon.h:134
struct uint32x4x2_t uint32x4x2_t
#define vst2q_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8x2_t __s1 = __p1; \
poly16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
})
struct poly16x8x2_t poly16x8x2_t
poly16x8_t val[2]
Definition: arm_neon.h:184
#define vst2q_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16x2_t __s1 = __p1; \
poly8x16x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
})
struct poly8x16x2_t poly8x16x2_t
poly8x16_t val[2]
Definition: arm_neon.h:176
#define vst2q_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8x2_t __s1 = __p1; \
int16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
})
struct int16x8x2_t int16x8x2_t
int16x8_t val[2]
Definition: arm_neon.h:94
#define vst2q_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4x2_t __s1 = __p1; \
int32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
})
struct int32x4x2_t int32x4x2_t
int32x4_t val[2]
Definition: arm_neon.h:102
#define vst2q_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16x2_t __s1 = __p1; \
int8x16x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
})
int8x16_t val[2]
Definition: arm_neon.h:86
struct int8x16x2_t int8x16x2_t
#define vst2q_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8x2_t __s1 = __p1; \
uint16x8x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
})
struct uint16x8x2_t uint16x8x2_t
uint16x8_t val[2]
Definition: arm_neon.h:126
#define vst2q_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4x2_t __s1 = __p1; \
uint32x4x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
})
uint32x4_t val[2]
Definition: arm_neon.h:134
struct uint32x4x2_t uint32x4x2_t
#define vst2q_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16x2_t __s1 = __p1; \
uint8x16x2_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
})
uint8x16_t val[2]
Definition: arm_neon.h:118
struct uint8x16x2_t uint8x16x2_t
#define vst3_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x4x3_t __s1 = __p1; \
float16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
})
float16x4_t val[3]
Definition: arm_neon.h:262
struct float16x4x3_t float16x4x3_t
#define vst3_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2x3_t __s1 = __p1; \
float32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
})
struct float32x2x3_t float32x2x3_t
float32x2_t val[3]
Definition: arm_neon.h:270
#define vst3_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x3_t __s1 = __p1; \
float16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
})
float16x4_t val[3]
Definition: arm_neon.h:262
struct float16x4x3_t float16x4x3_t
#define vst3_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x3_t __s1 = __p1; \
float32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
})
struct float32x2x3_t float32x2x3_t
float32x2_t val[3]
Definition: arm_neon.h:270
#define vst3_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x3_t __s1 = __p1; \
poly16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
})
struct poly16x4x3_t poly16x4x3_t
poly16x4_t val[3]
Definition: arm_neon.h:296
#define vst3_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x3_t __s1 = __p1; \
poly8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
})
struct poly8x8x3_t poly8x8x3_t
poly8x8_t val[3]
Definition: arm_neon.h:288
#define vst3_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x3_t __s1 = __p1; \
int16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
})
struct int16x4x3_t int16x4x3_t
int16x4_t val[3]
Definition: arm_neon.h:206
#define vst3_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x3_t __s1 = __p1; \
int32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
})
struct int32x2x3_t int32x2x3_t
int32x2_t val[3]
Definition: arm_neon.h:214
#define vst3_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x3_t __s1 = __p1; \
int8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
})
int8x8_t val[3]
Definition: arm_neon.h:198
struct int8x8x3_t int8x8x3_t
#define vst3_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x3_t __s1 = __p1; \
uint16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
})
uint16x4_t val[3]
Definition: arm_neon.h:238
struct uint16x4x3_t uint16x4x3_t
#define vst3_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x3_t __s1 = __p1; \
uint32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
})
uint32x2_t val[3]
Definition: arm_neon.h:246
struct uint32x2x3_t uint32x2x3_t
#define vst3_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x3_t __s1 = __p1; \
uint8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
})
struct uint8x8x3_t uint8x8x3_t
uint8x8_t val[3]
Definition: arm_neon.h:230
#define vst3_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4x3_t __s1 = __p1; \
poly16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
})
struct poly16x4x3_t poly16x4x3_t
poly16x4_t val[3]
Definition: arm_neon.h:296
#define vst3_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8x3_t __s1 = __p1; \
poly8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
})
struct poly8x8x3_t poly8x8x3_t
poly8x8_t val[3]
Definition: arm_neon.h:288
#define vst3_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4x3_t __s1 = __p1; \
int16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
})
struct int16x4x3_t int16x4x3_t
int16x4_t val[3]
Definition: arm_neon.h:206
#define vst3_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2x3_t __s1 = __p1; \
int32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
})
struct int32x2x3_t int32x2x3_t
int32x2_t val[3]
Definition: arm_neon.h:214
#define vst3_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1x3_t __s1 = __p1; \
__builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
})
struct int64x1x3_t int64x1x3_t
#define vst3_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8x3_t __s1 = __p1; \
int8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
})
int8x8_t val[3]
Definition: arm_neon.h:198
struct int8x8x3_t int8x8x3_t
#define vst3_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4x3_t __s1 = __p1; \
uint16x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
})
uint16x4_t val[3]
Definition: arm_neon.h:238
struct uint16x4x3_t uint16x4x3_t
#define vst3_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2x3_t __s1 = __p1; \
uint32x2x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
})
uint32x2_t val[3]
Definition: arm_neon.h:246
struct uint32x2x3_t uint32x2x3_t
#define vst3_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1x3_t __s1 = __p1; \
__builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
})
struct uint64x1x3_t uint64x1x3_t
#define vst3_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8x3_t __s1 = __p1; \
uint8x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
})
struct uint8x8x3_t uint8x8x3_t
uint8x8_t val[3]
Definition: arm_neon.h:230
#define vst3q_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x8x3_t __s1 = __p1; \
float16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
})
float16x8_t val[3]
Definition: arm_neon.h:266
struct float16x8x3_t float16x8x3_t
#define vst3q_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4x3_t __s1 = __p1; \
float32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
})
struct float32x4x3_t float32x4x3_t
float32x4_t val[3]
Definition: arm_neon.h:274
#define vst3q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x3_t __s1 = __p1; \
float16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
})
float16x8_t val[3]
Definition: arm_neon.h:266
struct float16x8x3_t float16x8x3_t
#define vst3q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x3_t __s1 = __p1; \
float32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
})
struct float32x4x3_t float32x4x3_t
float32x4_t val[3]
Definition: arm_neon.h:274
#define vst3q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x3_t __s1 = __p1; \
poly16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
})
struct poly16x8x3_t poly16x8x3_t
poly16x8_t val[3]
Definition: arm_neon.h:300
#define vst3q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x3_t __s1 = __p1; \
int16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
})
struct int16x8x3_t int16x8x3_t
int16x8_t val[3]
Definition: arm_neon.h:210
#define vst3q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x3_t __s1 = __p1; \
int32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
})
int32x4_t val[3]
Definition: arm_neon.h:218
struct int32x4x3_t int32x4x3_t
#define vst3q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x3_t __s1 = __p1; \
uint16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
})
uint16x8_t val[3]
Definition: arm_neon.h:242
struct uint16x8x3_t uint16x8x3_t
#define vst3q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x3_t __s1 = __p1; \
uint32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
})
struct uint32x4x3_t uint32x4x3_t
uint32x4_t val[3]
Definition: arm_neon.h:250
#define vst3q_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8x3_t __s1 = __p1; \
poly16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
})
struct poly16x8x3_t poly16x8x3_t
poly16x8_t val[3]
Definition: arm_neon.h:300
#define vst3q_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16x3_t __s1 = __p1; \
poly8x16x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
})
poly8x16_t val[3]
Definition: arm_neon.h:292
struct poly8x16x3_t poly8x16x3_t
#define vst3q_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8x3_t __s1 = __p1; \
int16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
})
struct int16x8x3_t int16x8x3_t
int16x8_t val[3]
Definition: arm_neon.h:210
#define vst3q_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4x3_t __s1 = __p1; \
int32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
})
int32x4_t val[3]
Definition: arm_neon.h:218
struct int32x4x3_t int32x4x3_t
#define vst3q_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16x3_t __s1 = __p1; \
int8x16x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
})
int8x16_t val[3]
Definition: arm_neon.h:202
struct int8x16x3_t int8x16x3_t
#define vst3q_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8x3_t __s1 = __p1; \
uint16x8x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
})
uint16x8_t val[3]
Definition: arm_neon.h:242
struct uint16x8x3_t uint16x8x3_t
#define vst3q_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4x3_t __s1 = __p1; \
uint32x4x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
})
struct uint32x4x3_t uint32x4x3_t
uint32x4_t val[3]
Definition: arm_neon.h:250
#define vst3q_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16x3_t __s1 = __p1; \
uint8x16x3_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
})
struct uint8x16x3_t uint8x16x3_t
uint8x16_t val[3]
Definition: arm_neon.h:234
#define vst4_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x4x4_t __s1 = __p1; \
float16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
})
float16x4_t val[4]
Definition: arm_neon.h:378
struct float16x4x4_t float16x4x4_t
#define vst4_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x2x4_t __s1 = __p1; \
float32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
})
struct float32x2x4_t float32x2x4_t
float32x2_t val[4]
Definition: arm_neon.h:386
#define vst4_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x4x4_t __s1 = __p1; \
float16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
})
float16x4_t val[4]
Definition: arm_neon.h:378
struct float16x4x4_t float16x4x4_t
#define vst4_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x2x4_t __s1 = __p1; \
float32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
})
struct float32x2x4_t float32x2x4_t
float32x2_t val[4]
Definition: arm_neon.h:386
#define vst4_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x4x4_t __s1 = __p1; \
poly16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
})
struct poly16x4x4_t poly16x4x4_t
poly16x4_t val[4]
Definition: arm_neon.h:412
#define vst4_lane_p8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly8x8x4_t __s1 = __p1; \
poly8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
})
struct poly8x8x4_t poly8x8x4_t
poly8x8_t val[4]
Definition: arm_neon.h:404
#define vst4_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x4x4_t __s1 = __p1; \
int16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
})
struct int16x4x4_t int16x4x4_t
int16x4_t val[4]
Definition: arm_neon.h:322
#define vst4_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x2x4_t __s1 = __p1; \
int32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
})
struct int32x2x4_t int32x2x4_t
int32x2_t val[4]
Definition: arm_neon.h:330
#define vst4_lane_s8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int8x8x4_t __s1 = __p1; \
int8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
})
int8x8_t val[4]
Definition: arm_neon.h:314
struct int8x8x4_t int8x8x4_t
#define vst4_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x4x4_t __s1 = __p1; \
uint16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
})
struct uint16x4x4_t uint16x4x4_t
uint16x4_t val[4]
Definition: arm_neon.h:354
#define vst4_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x2x4_t __s1 = __p1; \
uint32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
})
struct uint32x2x4_t uint32x2x4_t
uint32x2_t val[4]
Definition: arm_neon.h:362
#define vst4_lane_u8 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint8x8x4_t __s1 = __p1; \
uint8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
})
uint8x8_t val[4]
Definition: arm_neon.h:346
struct uint8x8x4_t uint8x8x4_t
#define vst4_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x4x4_t __s1 = __p1; \
poly16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
})
struct poly16x4x4_t poly16x4x4_t
poly16x4_t val[4]
Definition: arm_neon.h:412
#define vst4_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x8x4_t __s1 = __p1; \
poly8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
})
struct poly8x8x4_t poly8x8x4_t
poly8x8_t val[4]
Definition: arm_neon.h:404
#define vst4_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x4x4_t __s1 = __p1; \
int16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
})
struct int16x4x4_t int16x4x4_t
int16x4_t val[4]
Definition: arm_neon.h:322
#define vst4_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x2x4_t __s1 = __p1; \
int32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
})
struct int32x2x4_t int32x2x4_t
int32x2_t val[4]
Definition: arm_neon.h:330
#define vst4_s64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int64x1x4_t __s1 = __p1; \
__builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
})
struct int64x1x4_t int64x1x4_t
#define vst4_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x8x4_t __s1 = __p1; \
int8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
})
int8x8_t val[4]
Definition: arm_neon.h:314
struct int8x8x4_t int8x8x4_t
#define vst4_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x4x4_t __s1 = __p1; \
uint16x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
})
struct uint16x4x4_t uint16x4x4_t
uint16x4_t val[4]
Definition: arm_neon.h:354
#define vst4_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x2x4_t __s1 = __p1; \
uint32x2x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
})
struct uint32x2x4_t uint32x2x4_t
uint32x2_t val[4]
Definition: arm_neon.h:362
#define vst4_u64 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint64x1x4_t __s1 = __p1; \
__builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
})
struct uint64x1x4_t uint64x1x4_t
#define vst4_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x8x4_t __s1 = __p1; \
uint8x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
})
uint8x8_t val[4]
Definition: arm_neon.h:346
struct uint8x8x4_t uint8x8x4_t
#define vst4q_f16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float16x8x4_t __s1 = __p1; \
float16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
})
float16x8_t val[4]
Definition: arm_neon.h:382
struct float16x8x4_t float16x8x4_t
#define vst4q_f32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
float32x4x4_t __s1 = __p1; \
float32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
})
float32x4_t val[4]
Definition: arm_neon.h:390
struct float32x4x4_t float32x4x4_t
#define vst4q_lane_f16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float16x8x4_t __s1 = __p1; \
float16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
})
float16x8_t val[4]
Definition: arm_neon.h:382
struct float16x8x4_t float16x8x4_t
#define vst4q_lane_f32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
float32x4x4_t __s1 = __p1; \
float32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
})
float32x4_t val[4]
Definition: arm_neon.h:390
struct float32x4x4_t float32x4x4_t
#define vst4q_lane_p16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
poly16x8x4_t __s1 = __p1; \
poly16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
})
poly16x8_t val[4]
Definition: arm_neon.h:416
struct poly16x8x4_t poly16x8x4_t
#define vst4q_lane_s16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int16x8x4_t __s1 = __p1; \
int16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
})
struct int16x8x4_t int16x8x4_t
int16x8_t val[4]
Definition: arm_neon.h:326
#define vst4q_lane_s32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
int32x4x4_t __s1 = __p1; \
int32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
})
struct int32x4x4_t int32x4x4_t
int32x4_t val[4]
Definition: arm_neon.h:334
#define vst4q_lane_u16 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint16x8x4_t __s1 = __p1; \
uint16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
})
uint16x8_t val[4]
Definition: arm_neon.h:358
struct uint16x8x4_t uint16x8x4_t
#define vst4q_lane_u32 (   __p0,
  __p1,
  __p2 
)
Value:
__extension__ ({ \
uint32x4x4_t __s1 = __p1; \
uint32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
})
uint32x4_t val[4]
Definition: arm_neon.h:366
struct uint32x4x4_t uint32x4x4_t
#define vst4q_p16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly16x8x4_t __s1 = __p1; \
poly16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
})
poly16x8_t val[4]
Definition: arm_neon.h:416
struct poly16x8x4_t poly16x8x4_t
#define vst4q_p8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
poly8x16x4_t __s1 = __p1; \
poly8x16x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
})
poly8x16_t val[4]
Definition: arm_neon.h:408
struct poly8x16x4_t poly8x16x4_t
#define vst4q_s16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int16x8x4_t __s1 = __p1; \
int16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
})
struct int16x8x4_t int16x8x4_t
int16x8_t val[4]
Definition: arm_neon.h:326
#define vst4q_s32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int32x4x4_t __s1 = __p1; \
int32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
})
struct int32x4x4_t int32x4x4_t
int32x4_t val[4]
Definition: arm_neon.h:334
#define vst4q_s8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
int8x16x4_t __s1 = __p1; \
int8x16x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
})
int8x16_t val[4]
Definition: arm_neon.h:318
struct int8x16x4_t int8x16x4_t
#define vst4q_u16 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint16x8x4_t __s1 = __p1; \
uint16x8x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
})
uint16x8_t val[4]
Definition: arm_neon.h:358
struct uint16x8x4_t uint16x8x4_t
#define vst4q_u32 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint32x4x4_t __s1 = __p1; \
uint32x4x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
})
uint32x4_t val[4]
Definition: arm_neon.h:366
struct uint32x4x4_t uint32x4x4_t
#define vst4q_u8 (   __p0,
  __p1 
)
Value:
__extension__ ({ \
uint8x16x4_t __s1 = __p1; \
uint8x16x4_t __rev1; \
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
__builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
})
struct uint8x16x4_t uint8x16x4_t
uint8x16_t val[4]
Definition: arm_neon.h:350

Typedef Documentation

typedef __fp16 float16_t
typedef struct float16x4x2_t float16x4x2_t
typedef struct float16x4x3_t float16x4x3_t
typedef struct float16x4x4_t float16x4x4_t
typedef struct float16x8x2_t float16x8x2_t
typedef struct float16x8x3_t float16x8x3_t
typedef struct float16x8x4_t float16x8x4_t
typedef float float32_t
typedef struct float32x2x2_t float32x2x2_t
typedef struct float32x2x3_t float32x2x3_t
typedef struct float32x2x4_t float32x2x4_t
typedef struct float32x4x2_t float32x4x2_t
typedef struct float32x4x3_t float32x4x3_t
typedef struct float32x4x4_t float32x4x4_t
typedef struct int16x4x2_t int16x4x2_t
typedef struct int16x4x3_t int16x4x3_t
typedef struct int16x4x4_t int16x4x4_t
typedef struct int16x8x2_t int16x8x2_t
typedef struct int16x8x3_t int16x8x3_t
typedef struct int16x8x4_t int16x8x4_t
typedef struct int32x2x2_t int32x2x2_t
typedef struct int32x2x3_t int32x2x3_t
typedef struct int32x2x4_t int32x2x4_t
typedef struct int32x4x2_t int32x4x2_t
typedef struct int32x4x3_t int32x4x3_t
typedef struct int32x4x4_t int32x4x4_t
typedef struct int64x1x2_t int64x1x2_t
typedef struct int64x1x3_t int64x1x3_t
typedef struct int64x1x4_t int64x1x4_t
typedef struct int64x2x2_t int64x2x2_t
typedef struct int64x2x3_t int64x2x3_t
typedef struct int64x2x4_t int64x2x4_t
typedef struct int8x16x2_t int8x16x2_t
typedef struct int8x16x3_t int8x16x3_t
typedef struct int8x16x4_t int8x16x4_t
typedef struct int8x8x2_t int8x8x2_t
typedef struct int8x8x3_t int8x8x3_t
typedef struct int8x8x4_t int8x8x4_t
typedef int16_t poly16_t
typedef struct poly16x4x2_t poly16x4x2_t
typedef struct poly16x4x3_t poly16x4x3_t
typedef struct poly16x4x4_t poly16x4x4_t
typedef struct poly16x8x2_t poly16x8x2_t
typedef struct poly16x8x3_t poly16x8x3_t
typedef struct poly16x8x4_t poly16x8x4_t
typedef int8_t poly8_t
typedef struct poly8x16x2_t poly8x16x2_t
typedef struct poly8x16x3_t poly8x16x3_t
typedef struct poly8x16x4_t poly8x16x4_t
typedef struct poly8x8x2_t poly8x8x2_t
typedef struct poly8x8x3_t poly8x8x3_t
typedef struct poly8x8x4_t poly8x8x4_t
typedef struct uint16x4x2_t uint16x4x2_t
typedef struct uint16x4x3_t uint16x4x3_t
typedef struct uint16x4x4_t uint16x4x4_t
typedef struct uint16x8x2_t uint16x8x2_t
typedef struct uint16x8x3_t uint16x8x3_t
typedef struct uint16x8x4_t uint16x8x4_t
typedef struct uint32x2x2_t uint32x2x2_t
typedef struct uint32x2x3_t uint32x2x3_t
typedef struct uint32x2x4_t uint32x2x4_t
typedef struct uint32x4x2_t uint32x4x2_t
typedef struct uint32x4x3_t uint32x4x3_t
typedef struct uint32x4x4_t uint32x4x4_t
typedef struct uint64x1x2_t uint64x1x2_t
typedef struct uint64x1x3_t uint64x1x3_t
typedef struct uint64x1x4_t uint64x1x4_t
typedef struct uint64x2x2_t uint64x2x2_t
typedef struct uint64x2x3_t uint64x2x3_t
typedef struct uint64x2x4_t uint64x2x4_t
typedef struct uint8x16x2_t uint8x16x2_t
typedef struct uint8x16x3_t uint8x16x3_t
typedef struct uint8x16x4_t uint8x16x4_t
typedef struct uint8x8x2_t uint8x8x2_t
typedef struct uint8x8x3_t uint8x8x3_t
typedef struct uint8x8x4_t uint8x8x4_t

Function Documentation

typedef __attribute__ ( (neon_vector_type(8))  )
typedef __attribute__ ( (neon_vector_type(16))  )
typedef __attribute__ ( (neon_vector_type(4))  )
typedef __attribute__ ( (neon_vector_type(2))  )
typedef __attribute__ ( (neon_vector_type(1))  )
typedef __attribute__ ( (neon_polyvector_type(8))  )
typedef __attribute__ ( (neon_polyvector_type(16))  )
typedef __attribute__ ( (neon_polyvector_type(4))  )
__ai int32x4_t __noswap_vabal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t __noswap_vabal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t __noswap_vabal_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t __noswap_vabal_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t __noswap_vabal_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t __noswap_vabal_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int16x4_t __noswap_vabd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t __noswap_vabd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t __noswap_vabd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t __noswap_vabd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t __noswap_vabd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t __noswap_vabd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int32x4_t __noswap_vabdl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t __noswap_vabdl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vabdl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t __noswap_vabdl_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t __noswap_vabdl_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t __noswap_vabdl_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t __noswap_vabdq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t __noswap_vabdq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t __noswap_vabdq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t __noswap_vabdq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t __noswap_vabdq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t __noswap_vabdq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int8x8_t __noswap_vaddhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t __noswap_vaddhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t __noswap_vaddhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t __noswap_vaddhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t __noswap_vaddhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t __noswap_vaddhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai float16x8_t __noswap_vcombine_f16 ( float16x4_t  __p0,
float16x4_t  __p1 
)
__ai float32x4_t __noswap_vcombine_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x8_t __noswap_vcombine_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x4_t __noswap_vcombine_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x16_t __noswap_vcombine_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x8_t __noswap_vcombine_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x4_t __noswap_vcombine_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x16_t __noswap_vcombine_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float16x4_t __noswap_vcvt_f16_f32 ( float32x4_t  __p0)
__ai float32x4_t __noswap_vcvt_f32_f16 ( float16x4_t  __p0)
__ai float32x2_t __noswap_vfma_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32x2_t  __p2 
)
__ai float32x4_t __noswap_vfmaq_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32x4_t  __p2 
)
__ai float16x4_t __noswap_vget_high_f16 ( float16x8_t  __p0)
__ai float32x2_t __noswap_vget_high_f32 ( float32x4_t  __p0)
__ai poly8x8_t __noswap_vget_high_p8 ( poly8x16_t  __p0)
__ai int16x4_t __noswap_vget_high_s16 ( int16x8_t  __p0)
__ai int32x2_t __noswap_vget_high_s32 ( int32x4_t  __p0)
__ai int8x8_t __noswap_vget_high_s8 ( int8x16_t  __p0)
__ai uint16x4_t __noswap_vget_high_u16 ( uint16x8_t  __p0)
__ai uint32x2_t __noswap_vget_high_u32 ( uint32x4_t  __p0)
__ai uint8x8_t __noswap_vget_high_u8 ( uint8x16_t  __p0)
__ai int32x4_t __noswap_vmlal_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t __noswap_vmlal_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint32x4_t __noswap_vmlal_n_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint64x2_t __noswap_vmlal_n_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int32x4_t __noswap_vmlal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t __noswap_vmlal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t __noswap_vmlal_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t __noswap_vmlal_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t __noswap_vmlal_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t __noswap_vmlal_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int32x4_t __noswap_vmlsl_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t __noswap_vmlsl_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint32x4_t __noswap_vmlsl_n_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint64x2_t __noswap_vmlsl_n_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int32x4_t __noswap_vmlsl_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t __noswap_vmlsl_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t __noswap_vmlsl_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t __noswap_vmlsl_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t __noswap_vmlsl_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t __noswap_vmlsl_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int32x4_t __noswap_vmovl_s16 ( int16x4_t  __p0)
__ai int64x2_t __noswap_vmovl_s32 ( int32x2_t  __p0)
__ai int16x8_t __noswap_vmovl_s8 ( int8x8_t  __p0)
__ai uint32x4_t __noswap_vmovl_u16 ( uint16x4_t  __p0)
__ai uint64x2_t __noswap_vmovl_u32 ( uint32x2_t  __p0)
__ai uint16x8_t __noswap_vmovl_u8 ( uint8x8_t  __p0)
__ai int8x8_t __noswap_vmovn_s16 ( int16x8_t  __p0)
__ai int16x4_t __noswap_vmovn_s32 ( int32x4_t  __p0)
__ai int32x2_t __noswap_vmovn_s64 ( int64x2_t  __p0)
__ai uint8x8_t __noswap_vmovn_u16 ( uint16x8_t  __p0)
__ai uint16x4_t __noswap_vmovn_u32 ( uint32x4_t  __p0)
__ai uint32x2_t __noswap_vmovn_u64 ( uint64x2_t  __p0)
__ai int32x4_t __noswap_vmull_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int64x2_t __noswap_vmull_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai uint32x4_t __noswap_vmull_n_u16 ( uint16x4_t  __p0,
uint16_t  __p1 
)
__ai uint64x2_t __noswap_vmull_n_u32 ( uint32x2_t  __p0,
uint32_t  __p1 
)
__ai poly16x8_t __noswap_vmull_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int32x4_t __noswap_vmull_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t __noswap_vmull_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vmull_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t __noswap_vmull_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t __noswap_vmull_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t __noswap_vmull_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x4_t __noswap_vqadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t __noswap_vqadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vqaddq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t __noswap_vqaddq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x4_t __noswap_vqdmlal_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t __noswap_vqdmlal_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai int32x4_t __noswap_vqdmlal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t __noswap_vqdmlal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int32x4_t __noswap_vqdmlsl_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t __noswap_vqdmlsl_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai int32x4_t __noswap_vqdmlsl_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t __noswap_vqdmlsl_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x4_t __noswap_vqdmulh_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t __noswap_vqdmulh_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vqdmulhq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t __noswap_vqdmulhq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x4_t __noswap_vqdmull_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int64x2_t __noswap_vqdmull_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai int32x4_t __noswap_vqdmull_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t __noswap_vqdmull_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t __noswap_vqmovn_s16 ( int16x8_t  __p0)
__ai int16x4_t __noswap_vqmovn_s32 ( int32x4_t  __p0)
__ai int32x2_t __noswap_vqmovn_s64 ( int64x2_t  __p0)
__ai uint8x8_t __noswap_vqmovn_u16 ( uint16x8_t  __p0)
__ai uint16x4_t __noswap_vqmovn_u32 ( uint32x4_t  __p0)
__ai uint32x2_t __noswap_vqmovn_u64 ( uint64x2_t  __p0)
__ai uint8x8_t __noswap_vqmovun_s16 ( int16x8_t  __p0)
__ai uint16x4_t __noswap_vqmovun_s32 ( int32x4_t  __p0)
__ai uint32x2_t __noswap_vqmovun_s64 ( int64x2_t  __p0)
__ai int16x4_t __noswap_vqrdmulh_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t __noswap_vqrdmulh_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vqrdmulhq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t __noswap_vqrdmulhq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int16x4_t __noswap_vqsub_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t __noswap_vqsub_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t __noswap_vqsubq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t __noswap_vqsubq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x8_t __noswap_vraddhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t __noswap_vraddhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t __noswap_vraddhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t __noswap_vraddhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t __noswap_vraddhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t __noswap_vraddhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int8x8_t __noswap_vrsubhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t __noswap_vrsubhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t __noswap_vrsubhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t __noswap_vrsubhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t __noswap_vrsubhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t __noswap_vrsubhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int8x8_t __noswap_vsubhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t __noswap_vsubhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t __noswap_vsubhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t __noswap_vsubhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t __noswap_vsubhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t __noswap_vsubhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int16x4_t vaba_s16 ( int16x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int32x2_t vaba_s32 ( int32x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int8x8_t vaba_s8 ( int8x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint16x4_t vaba_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint32x2_t vaba_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint8x8_t vaba_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int32x4_t vabal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t vabal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t vabal_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t vabal_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t vabal_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t vabal_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int16x8_t vabaq_s16 ( int16x8_t  __p0,
int16x8_t  __p1,
int16x8_t  __p2 
)
__ai int32x4_t vabaq_s32 ( int32x4_t  __p0,
int32x4_t  __p1,
int32x4_t  __p2 
)
__ai int8x16_t vabaq_s8 ( int8x16_t  __p0,
int8x16_t  __p1,
int8x16_t  __p2 
)
__ai uint16x8_t vabaq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16x8_t  __p2 
)
__ai uint32x4_t vabaq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32x4_t  __p2 
)
__ai uint8x16_t vabaq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1,
uint8x16_t  __p2 
)
__ai float32x2_t vabd_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vabd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vabd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vabd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vabd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vabd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vabd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int32x4_t vabdl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vabdl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vabdl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vabdl_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vabdl_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vabdl_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vabdq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int16x8_t vabdq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vabdq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vabdq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vabdq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vabdq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vabdq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vabs_f32 ( float32x2_t  __p0)
__ai int16x4_t vabs_s16 ( int16x4_t  __p0)
__ai int32x2_t vabs_s32 ( int32x2_t  __p0)
__ai int8x8_t vabs_s8 ( int8x8_t  __p0)
__ai float32x4_t vabsq_f32 ( float32x4_t  __p0)
__ai int16x8_t vabsq_s16 ( int16x8_t  __p0)
__ai int32x4_t vabsq_s32 ( int32x4_t  __p0)
__ai int8x16_t vabsq_s8 ( int8x16_t  __p0)
__ai float32x2_t vadd_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vadd_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vadd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vadd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vadd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vadd_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vadd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vaddhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t vaddhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t vaddhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t vaddhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t vaddhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t vaddhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int32x4_t vaddl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vaddl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vaddl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vaddl_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vaddl_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vaddl_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vaddq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int16x8_t vaddq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vaddq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vaddq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vaddq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vaddq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vaddq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vaddq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vaddq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int32x4_t vaddw_s16 ( int32x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vaddw_s32 ( int64x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vaddw_s8 ( int16x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vaddw_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vaddw_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vaddw_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x4_t vand_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vand_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vand_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vand_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vand_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vand_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vand_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vand_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vandq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vandq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vandq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vandq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vandq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vandq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vandq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vandq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vbic_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vbic_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vbic_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vbic_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vbic_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vbic_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vbic_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vbic_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vbicq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vbicq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vbicq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vbicq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vbicq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vbicq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vbicq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vbicq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vbsl_f32 ( uint32x2_t  __p0,
float32x2_t  __p1,
float32x2_t  __p2 
)
__ai poly16x4_t vbsl_p16 ( uint16x4_t  __p0,
poly16x4_t  __p1,
poly16x4_t  __p2 
)
__ai poly8x8_t vbsl_p8 ( uint8x8_t  __p0,
poly8x8_t  __p1,
poly8x8_t  __p2 
)
__ai int16x4_t vbsl_s16 ( uint16x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int32x2_t vbsl_s32 ( uint32x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int64x1_t vbsl_s64 ( uint64x1_t  __p0,
int64x1_t  __p1,
int64x1_t  __p2 
)
__ai int8x8_t vbsl_s8 ( uint8x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint16x4_t vbsl_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint32x2_t vbsl_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint64x1_t vbsl_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1,
uint64x1_t  __p2 
)
__ai uint8x8_t vbsl_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai float32x4_t vbslq_f32 ( uint32x4_t  __p0,
float32x4_t  __p1,
float32x4_t  __p2 
)
__ai poly16x8_t vbslq_p16 ( uint16x8_t  __p0,
poly16x8_t  __p1,
poly16x8_t  __p2 
)
__ai poly8x16_t vbslq_p8 ( uint8x16_t  __p0,
poly8x16_t  __p1,
poly8x16_t  __p2 
)
__ai int16x8_t vbslq_s16 ( uint16x8_t  __p0,
int16x8_t  __p1,
int16x8_t  __p2 
)
__ai int32x4_t vbslq_s32 ( uint32x4_t  __p0,
int32x4_t  __p1,
int32x4_t  __p2 
)
__ai int64x2_t vbslq_s64 ( uint64x2_t  __p0,
int64x2_t  __p1,
int64x2_t  __p2 
)
__ai int8x16_t vbslq_s8 ( uint8x16_t  __p0,
int8x16_t  __p1,
int8x16_t  __p2 
)
__ai uint16x8_t vbslq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16x8_t  __p2 
)
__ai uint32x4_t vbslq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32x4_t  __p2 
)
__ai uint64x2_t vbslq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1,
uint64x2_t  __p2 
)
__ai uint8x16_t vbslq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1,
uint8x16_t  __p2 
)
__ai uint32x2_t vcage_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint32x4_t vcageq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint32x2_t vcagt_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint32x4_t vcagtq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint32x2_t vcale_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint32x4_t vcaleq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint32x2_t vcalt_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint32x4_t vcaltq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint32x2_t vceq_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint8x8_t vceq_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai uint16x4_t vceq_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vceq_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vceq_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vceq_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vceq_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vceq_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint32x4_t vceqq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint8x16_t vceqq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai uint16x8_t vceqq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vceqq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vceqq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vceqq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vceqq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vceqq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai uint32x2_t vcge_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint16x4_t vcge_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vcge_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vcge_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vcge_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vcge_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vcge_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint32x4_t vcgeq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint16x8_t vcgeq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vcgeq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vcgeq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vcgeq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vcgeq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vcgeq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai uint32x2_t vcgt_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint16x4_t vcgt_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vcgt_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vcgt_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vcgt_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vcgt_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vcgt_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint32x4_t vcgtq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint16x8_t vcgtq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vcgtq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vcgtq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vcgtq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vcgtq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vcgtq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai uint32x2_t vcle_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint16x4_t vcle_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vcle_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vcle_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vcle_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vcle_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vcle_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint32x4_t vcleq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint16x8_t vcleq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vcleq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vcleq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vcleq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vcleq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vcleq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vcls_s16 ( int16x4_t  __p0)
__ai int32x2_t vcls_s32 ( int32x2_t  __p0)
__ai int8x8_t vcls_s8 ( int8x8_t  __p0)
__ai int16x8_t vclsq_s16 ( int16x8_t  __p0)
__ai int32x4_t vclsq_s32 ( int32x4_t  __p0)
__ai int8x16_t vclsq_s8 ( int8x16_t  __p0)
__ai uint32x2_t vclt_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai uint16x4_t vclt_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vclt_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vclt_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vclt_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vclt_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vclt_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint32x4_t vcltq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai uint16x8_t vcltq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vcltq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vcltq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vcltq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vcltq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vcltq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vclz_s16 ( int16x4_t  __p0)
__ai int32x2_t vclz_s32 ( int32x2_t  __p0)
__ai int8x8_t vclz_s8 ( int8x8_t  __p0)
__ai uint16x4_t vclz_u16 ( uint16x4_t  __p0)
__ai uint32x2_t vclz_u32 ( uint32x2_t  __p0)
__ai uint8x8_t vclz_u8 ( uint8x8_t  __p0)
__ai int16x8_t vclzq_s16 ( int16x8_t  __p0)
__ai int32x4_t vclzq_s32 ( int32x4_t  __p0)
__ai int8x16_t vclzq_s8 ( int8x16_t  __p0)
__ai uint16x8_t vclzq_u16 ( uint16x8_t  __p0)
__ai uint32x4_t vclzq_u32 ( uint32x4_t  __p0)
__ai uint8x16_t vclzq_u8 ( uint8x16_t  __p0)
__ai poly8x8_t vcnt_p8 ( poly8x8_t  __p0)
__ai int8x8_t vcnt_s8 ( int8x8_t  __p0)
__ai uint8x8_t vcnt_u8 ( uint8x8_t  __p0)
__ai poly8x16_t vcntq_p8 ( poly8x16_t  __p0)
__ai int8x16_t vcntq_s8 ( int8x16_t  __p0)
__ai uint8x16_t vcntq_u8 ( uint8x16_t  __p0)
__ai float16x8_t vcombine_f16 ( float16x4_t  __p0,
float16x4_t  __p1 
)
__ai float32x4_t vcombine_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai poly16x8_t vcombine_p16 ( poly16x4_t  __p0,
poly16x4_t  __p1 
)
__ai poly8x16_t vcombine_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int16x8_t vcombine_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x4_t vcombine_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x2_t vcombine_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x16_t vcombine_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x8_t vcombine_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x4_t vcombine_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x2_t vcombine_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x16_t vcombine_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float16x4_t vcreate_f16 ( uint64_t  __p0)
__ai float32x2_t vcreate_f32 ( uint64_t  __p0)
__ai poly16x4_t vcreate_p16 ( uint64_t  __p0)
__ai poly8x8_t vcreate_p8 ( uint64_t  __p0)
__ai int16x4_t vcreate_s16 ( uint64_t  __p0)
__ai int32x2_t vcreate_s32 ( uint64_t  __p0)
__ai int64x1_t vcreate_s64 ( uint64_t  __p0)
__ai int8x8_t vcreate_s8 ( uint64_t  __p0)
__ai uint16x4_t vcreate_u16 ( uint64_t  __p0)
__ai uint32x2_t vcreate_u32 ( uint64_t  __p0)
__ai uint64x1_t vcreate_u64 ( uint64_t  __p0)
__ai uint8x8_t vcreate_u8 ( uint64_t  __p0)
__ai float16x4_t vcvt_f16_f32 ( float32x4_t  __p0)
__ai float32x4_t vcvt_f32_f16 ( float16x4_t  __p0)
__ai float32x2_t vcvt_f32_s32 ( int32x2_t  __p0)
__ai float32x2_t vcvt_f32_u32 ( uint32x2_t  __p0)
__ai int32x2_t vcvt_s32_f32 ( float32x2_t  __p0)
__ai uint32x2_t vcvt_u32_f32 ( float32x2_t  __p0)
__ai float32x4_t vcvtq_f32_s32 ( int32x4_t  __p0)
__ai float32x4_t vcvtq_f32_u32 ( uint32x4_t  __p0)
__ai int32x4_t vcvtq_s32_f32 ( float32x4_t  __p0)
__ai uint32x4_t vcvtq_u32_f32 ( float32x4_t  __p0)
__ai float32x2_t vdup_n_f32 ( float32_t  __p0)
__ai poly16x4_t vdup_n_p16 ( poly16_t  __p0)
__ai poly8x8_t vdup_n_p8 ( poly8_t  __p0)
__ai int16x4_t vdup_n_s16 ( int16_t  __p0)
__ai int32x2_t vdup_n_s32 ( int32_t  __p0)
__ai int64x1_t vdup_n_s64 ( int64_t  __p0)
__ai int8x8_t vdup_n_s8 ( int8_t  __p0)
__ai uint16x4_t vdup_n_u16 ( uint16_t  __p0)
__ai uint32x2_t vdup_n_u32 ( uint32_t  __p0)
__ai uint64x1_t vdup_n_u64 ( uint64_t  __p0)
__ai uint8x8_t vdup_n_u8 ( uint8_t  __p0)
__ai float32x4_t vdupq_n_f32 ( float32_t  __p0)
__ai poly16x8_t vdupq_n_p16 ( poly16_t  __p0)
__ai poly8x16_t vdupq_n_p8 ( poly8_t  __p0)
__ai int16x8_t vdupq_n_s16 ( int16_t  __p0)
__ai int32x4_t vdupq_n_s32 ( int32_t  __p0)
__ai int64x2_t vdupq_n_s64 ( int64_t  __p0)
__ai int8x16_t vdupq_n_s8 ( int8_t  __p0)
__ai uint16x8_t vdupq_n_u16 ( uint16_t  __p0)
__ai uint32x4_t vdupq_n_u32 ( uint32_t  __p0)
__ai uint64x2_t vdupq_n_u64 ( uint64_t  __p0)
__ai uint8x16_t vdupq_n_u8 ( uint8_t  __p0)
__ai int16x4_t veor_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t veor_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t veor_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t veor_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t veor_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t veor_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t veor_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t veor_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t veorq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t veorq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t veorq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t veorq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t veorq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t veorq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t veorq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t veorq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vfma_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32x2_t  __p2 
)
__ai float32x4_t vfmaq_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32x4_t  __p2 
)
__ai float16x4_t vget_high_f16 ( float16x8_t  __p0)
__ai float32x2_t vget_high_f32 ( float32x4_t  __p0)
__ai poly16x4_t vget_high_p16 ( poly16x8_t  __p0)
__ai poly8x8_t vget_high_p8 ( poly8x16_t  __p0)
__ai int16x4_t vget_high_s16 ( int16x8_t  __p0)
__ai int32x2_t vget_high_s32 ( int32x4_t  __p0)
__ai int64x1_t vget_high_s64 ( int64x2_t  __p0)
__ai int8x8_t vget_high_s8 ( int8x16_t  __p0)
__ai uint16x4_t vget_high_u16 ( uint16x8_t  __p0)
__ai uint32x2_t vget_high_u32 ( uint32x4_t  __p0)
__ai uint64x1_t vget_high_u64 ( uint64x2_t  __p0)
__ai uint8x8_t vget_high_u8 ( uint8x16_t  __p0)
__ai float16x4_t vget_low_f16 ( float16x8_t  __p0)
__ai float32x2_t vget_low_f32 ( float32x4_t  __p0)
__ai poly16x4_t vget_low_p16 ( poly16x8_t  __p0)
__ai poly8x8_t vget_low_p8 ( poly8x16_t  __p0)
__ai int16x4_t vget_low_s16 ( int16x8_t  __p0)
__ai int32x2_t vget_low_s32 ( int32x4_t  __p0)
__ai int64x1_t vget_low_s64 ( int64x2_t  __p0)
__ai int8x8_t vget_low_s8 ( int8x16_t  __p0)
__ai uint16x4_t vget_low_u16 ( uint16x8_t  __p0)
__ai uint32x2_t vget_low_u32 ( uint32x4_t  __p0)
__ai uint64x1_t vget_low_u64 ( uint64x2_t  __p0)
__ai uint8x8_t vget_low_u8 ( uint8x16_t  __p0)
__ai int16x4_t vhadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vhadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vhadd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vhadd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vhadd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vhadd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vhaddq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vhaddq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vhaddq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vhaddq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vhaddq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vhaddq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vhsub_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vhsub_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vhsub_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vhsub_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vhsub_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vhsub_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vhsubq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vhsubq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vhsubq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vhsubq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vhsubq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vhsubq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vmax_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vmax_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vmax_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vmax_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vmax_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vmax_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vmax_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vmaxq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int16x8_t vmaxq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vmaxq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vmaxq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vmaxq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vmaxq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vmaxq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vmin_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vmin_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vmin_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vmin_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vmin_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vmin_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vmin_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vminq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int16x8_t vminq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vminq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vminq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vminq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vminq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vminq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vmla_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32x2_t  __p2 
)
__ai float32x2_t vmla_n_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32_t  __p2 
)
__ai int16x4_t vmla_n_s16 ( int16x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int32x2_t vmla_n_s32 ( int32x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint16x4_t vmla_n_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint32x2_t vmla_n_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int16x4_t vmla_s16 ( int16x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int32x2_t vmla_s32 ( int32x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int8x8_t vmla_s8 ( int8x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint16x4_t vmla_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint32x2_t vmla_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint8x8_t vmla_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int32x4_t vmlal_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t vmlal_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint32x4_t vmlal_n_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint64x2_t vmlal_n_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int32x4_t vmlal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t vmlal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t vmlal_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t vmlal_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t vmlal_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t vmlal_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai float32x4_t vmlaq_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32x4_t  __p2 
)
__ai float32x4_t vmlaq_n_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32_t  __p2 
)
__ai int16x8_t vmlaq_n_s16 ( int16x8_t  __p0,
int16x8_t  __p1,
int16_t  __p2 
)
__ai int32x4_t vmlaq_n_s32 ( int32x4_t  __p0,
int32x4_t  __p1,
int32_t  __p2 
)
__ai uint16x8_t vmlaq_n_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16_t  __p2 
)
__ai uint32x4_t vmlaq_n_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32_t  __p2 
)
__ai int16x8_t vmlaq_s16 ( int16x8_t  __p0,
int16x8_t  __p1,
int16x8_t  __p2 
)
__ai int32x4_t vmlaq_s32 ( int32x4_t  __p0,
int32x4_t  __p1,
int32x4_t  __p2 
)
__ai int8x16_t vmlaq_s8 ( int8x16_t  __p0,
int8x16_t  __p1,
int8x16_t  __p2 
)
__ai uint16x8_t vmlaq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16x8_t  __p2 
)
__ai uint32x4_t vmlaq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32x4_t  __p2 
)
__ai uint8x16_t vmlaq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1,
uint8x16_t  __p2 
)
__ai float32x2_t vmls_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32x2_t  __p2 
)
__ai float32x2_t vmls_n_f32 ( float32x2_t  __p0,
float32x2_t  __p1,
float32_t  __p2 
)
__ai int16x4_t vmls_n_s16 ( int16x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int32x2_t vmls_n_s32 ( int32x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint16x4_t vmls_n_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint32x2_t vmls_n_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int16x4_t vmls_s16 ( int16x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int32x2_t vmls_s32 ( int32x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int8x8_t vmls_s8 ( int8x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint16x4_t vmls_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint32x2_t vmls_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint8x8_t vmls_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int32x4_t vmlsl_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t vmlsl_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai uint32x4_t vmlsl_n_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16_t  __p2 
)
__ai uint64x2_t vmlsl_n_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32_t  __p2 
)
__ai int32x4_t vmlsl_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t vmlsl_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x8_t vmlsl_s8 ( int16x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint32x4_t vmlsl_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1,
uint16x4_t  __p2 
)
__ai uint64x2_t vmlsl_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1,
uint32x2_t  __p2 
)
__ai uint16x8_t vmlsl_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai float32x4_t vmlsq_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32x4_t  __p2 
)
__ai float32x4_t vmlsq_n_f32 ( float32x4_t  __p0,
float32x4_t  __p1,
float32_t  __p2 
)
__ai int16x8_t vmlsq_n_s16 ( int16x8_t  __p0,
int16x8_t  __p1,
int16_t  __p2 
)
__ai int32x4_t vmlsq_n_s32 ( int32x4_t  __p0,
int32x4_t  __p1,
int32_t  __p2 
)
__ai uint16x8_t vmlsq_n_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16_t  __p2 
)
__ai uint32x4_t vmlsq_n_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32_t  __p2 
)
__ai int16x8_t vmlsq_s16 ( int16x8_t  __p0,
int16x8_t  __p1,
int16x8_t  __p2 
)
__ai int32x4_t vmlsq_s32 ( int32x4_t  __p0,
int32x4_t  __p1,
int32x4_t  __p2 
)
__ai int8x16_t vmlsq_s8 ( int8x16_t  __p0,
int8x16_t  __p1,
int8x16_t  __p2 
)
__ai uint16x8_t vmlsq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1,
uint16x8_t  __p2 
)
__ai uint32x4_t vmlsq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1,
uint32x4_t  __p2 
)
__ai uint8x16_t vmlsq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1,
uint8x16_t  __p2 
)
__ai float32x2_t vmov_n_f32 ( float32_t  __p0)
__ai poly16x4_t vmov_n_p16 ( poly16_t  __p0)
__ai poly8x8_t vmov_n_p8 ( poly8_t  __p0)
__ai int16x4_t vmov_n_s16 ( int16_t  __p0)
__ai int32x2_t vmov_n_s32 ( int32_t  __p0)
__ai int64x1_t vmov_n_s64 ( int64_t  __p0)
__ai int8x8_t vmov_n_s8 ( int8_t  __p0)
__ai uint16x4_t vmov_n_u16 ( uint16_t  __p0)
__ai uint32x2_t vmov_n_u32 ( uint32_t  __p0)
__ai uint64x1_t vmov_n_u64 ( uint64_t  __p0)
__ai uint8x8_t vmov_n_u8 ( uint8_t  __p0)
__ai int32x4_t vmovl_s16 ( int16x4_t  __p0)
__ai int64x2_t vmovl_s32 ( int32x2_t  __p0)
__ai int16x8_t vmovl_s8 ( int8x8_t  __p0)
__ai uint32x4_t vmovl_u16 ( uint16x4_t  __p0)
__ai uint64x2_t vmovl_u32 ( uint32x2_t  __p0)
__ai uint16x8_t vmovl_u8 ( uint8x8_t  __p0)
__ai int8x8_t vmovn_s16 ( int16x8_t  __p0)
__ai int16x4_t vmovn_s32 ( int32x4_t  __p0)
__ai int32x2_t vmovn_s64 ( int64x2_t  __p0)
__ai uint8x8_t vmovn_u16 ( uint16x8_t  __p0)
__ai uint16x4_t vmovn_u32 ( uint32x4_t  __p0)
__ai uint32x2_t vmovn_u64 ( uint64x2_t  __p0)
__ai float32x4_t vmovq_n_f32 ( float32_t  __p0)
__ai poly16x8_t vmovq_n_p16 ( poly16_t  __p0)
__ai poly8x16_t vmovq_n_p8 ( poly8_t  __p0)
__ai int16x8_t vmovq_n_s16 ( int16_t  __p0)
__ai int32x4_t vmovq_n_s32 ( int32_t  __p0)
__ai int64x2_t vmovq_n_s64 ( int64_t  __p0)
__ai int8x16_t vmovq_n_s8 ( int8_t  __p0)
__ai uint16x8_t vmovq_n_u16 ( uint16_t  __p0)
__ai uint32x4_t vmovq_n_u32 ( uint32_t  __p0)
__ai uint64x2_t vmovq_n_u64 ( uint64_t  __p0)
__ai uint8x16_t vmovq_n_u8 ( uint8_t  __p0)
__ai float32x2_t vmul_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai float32x2_t vmul_n_f32 ( float32x2_t  __p0,
float32_t  __p1 
)
__ai int16x4_t vmul_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int32x2_t vmul_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai uint16x4_t vmul_n_u16 ( uint16x4_t  __p0,
uint16_t  __p1 
)
__ai uint32x2_t vmul_n_u32 ( uint32x2_t  __p0,
uint32_t  __p1 
)
__ai poly8x8_t vmul_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int16x4_t vmul_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vmul_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vmul_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vmul_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vmul_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vmul_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int32x4_t vmull_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int64x2_t vmull_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai uint32x4_t vmull_n_u16 ( uint16x4_t  __p0,
uint16_t  __p1 
)
__ai uint64x2_t vmull_n_u32 ( uint32x2_t  __p0,
uint32_t  __p1 
)
__ai poly16x8_t vmull_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int32x4_t vmull_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vmull_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vmull_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vmull_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vmull_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vmull_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vmulq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai float32x4_t vmulq_n_f32 ( float32x4_t  __p0,
float32_t  __p1 
)
__ai int16x8_t vmulq_n_s16 ( int16x8_t  __p0,
int16_t  __p1 
)
__ai int32x4_t vmulq_n_s32 ( int32x4_t  __p0,
int32_t  __p1 
)
__ai uint16x8_t vmulq_n_u16 ( uint16x8_t  __p0,
uint16_t  __p1 
)
__ai uint32x4_t vmulq_n_u32 ( uint32x4_t  __p0,
uint32_t  __p1 
)
__ai poly8x16_t vmulq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai int16x8_t vmulq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vmulq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vmulq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vmulq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vmulq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vmulq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai poly8x8_t vmvn_p8 ( poly8x8_t  __p0)
__ai int16x4_t vmvn_s16 ( int16x4_t  __p0)
__ai int32x2_t vmvn_s32 ( int32x2_t  __p0)
__ai int8x8_t vmvn_s8 ( int8x8_t  __p0)
__ai uint16x4_t vmvn_u16 ( uint16x4_t  __p0)
__ai uint32x2_t vmvn_u32 ( uint32x2_t  __p0)
__ai uint8x8_t vmvn_u8 ( uint8x8_t  __p0)
__ai poly8x16_t vmvnq_p8 ( poly8x16_t  __p0)
__ai int16x8_t vmvnq_s16 ( int16x8_t  __p0)
__ai int32x4_t vmvnq_s32 ( int32x4_t  __p0)
__ai int8x16_t vmvnq_s8 ( int8x16_t  __p0)
__ai uint16x8_t vmvnq_u16 ( uint16x8_t  __p0)
__ai uint32x4_t vmvnq_u32 ( uint32x4_t  __p0)
__ai uint8x16_t vmvnq_u8 ( uint8x16_t  __p0)
__ai float32x2_t vneg_f32 ( float32x2_t  __p0)
__ai int16x4_t vneg_s16 ( int16x4_t  __p0)
__ai int32x2_t vneg_s32 ( int32x2_t  __p0)
__ai int8x8_t vneg_s8 ( int8x8_t  __p0)
__ai float32x4_t vnegq_f32 ( float32x4_t  __p0)
__ai int16x8_t vnegq_s16 ( int16x8_t  __p0)
__ai int32x4_t vnegq_s32 ( int32x4_t  __p0)
__ai int8x16_t vnegq_s8 ( int8x16_t  __p0)
__ai int16x4_t vorn_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vorn_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vorn_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vorn_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vorn_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vorn_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vorn_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vorn_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vornq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vornq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vornq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vornq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vornq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vornq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vornq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vornq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vorr_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vorr_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vorr_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vorr_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vorr_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vorr_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vorr_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vorr_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vorrq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vorrq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vorrq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vorrq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vorrq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vorrq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vorrq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vorrq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int32x2_t vpadal_s16 ( int32x2_t  __p0,
int16x4_t  __p1 
)
__ai int64x1_t vpadal_s32 ( int64x1_t  __p0,
int32x2_t  __p1 
)
__ai int16x4_t vpadal_s8 ( int16x4_t  __p0,
int8x8_t  __p1 
)
__ai uint32x2_t vpadal_u16 ( uint32x2_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x1_t vpadal_u32 ( uint64x1_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x4_t vpadal_u8 ( uint16x4_t  __p0,
uint8x8_t  __p1 
)
__ai int32x4_t vpadalq_s16 ( int32x4_t  __p0,
int16x8_t  __p1 
)
__ai int64x2_t vpadalq_s32 ( int64x2_t  __p0,
int32x4_t  __p1 
)
__ai int16x8_t vpadalq_s8 ( int16x8_t  __p0,
int8x16_t  __p1 
)
__ai uint32x4_t vpadalq_u16 ( uint32x4_t  __p0,
uint16x8_t  __p1 
)
__ai uint64x2_t vpadalq_u32 ( uint64x2_t  __p0,
uint32x4_t  __p1 
)
__ai uint16x8_t vpadalq_u8 ( uint16x8_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2_t vpadd_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vpadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vpadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vpadd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vpadd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vpadd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vpadd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int32x2_t vpaddl_s16 ( int16x4_t  __p0)
__ai int64x1_t vpaddl_s32 ( int32x2_t  __p0)
__ai int16x4_t vpaddl_s8 ( int8x8_t  __p0)
__ai uint32x2_t vpaddl_u16 ( uint16x4_t  __p0)
__ai uint64x1_t vpaddl_u32 ( uint32x2_t  __p0)
__ai uint16x4_t vpaddl_u8 ( uint8x8_t  __p0)
__ai int32x4_t vpaddlq_s16 ( int16x8_t  __p0)
__ai int64x2_t vpaddlq_s32 ( int32x4_t  __p0)
__ai int16x8_t vpaddlq_s8 ( int8x16_t  __p0)
__ai uint32x4_t vpaddlq_u16 ( uint16x8_t  __p0)
__ai uint64x2_t vpaddlq_u32 ( uint32x4_t  __p0)
__ai uint16x8_t vpaddlq_u8 ( uint8x16_t  __p0)
__ai float32x2_t vpmax_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vpmax_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vpmax_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vpmax_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vpmax_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vpmax_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vpmax_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x2_t vpmin_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vpmin_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vpmin_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vpmin_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vpmin_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vpmin_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vpmin_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x4_t vqabs_s16 ( int16x4_t  __p0)
__ai int32x2_t vqabs_s32 ( int32x2_t  __p0)
__ai int8x8_t vqabs_s8 ( int8x8_t  __p0)
__ai int16x8_t vqabsq_s16 ( int16x8_t  __p0)
__ai int32x4_t vqabsq_s32 ( int32x4_t  __p0)
__ai int8x16_t vqabsq_s8 ( int8x16_t  __p0)
__ai int16x4_t vqadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vqadd_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vqadd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vqadd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vqadd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vqadd_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vqadd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vqaddq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqaddq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vqaddq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vqaddq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vqaddq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vqaddq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vqaddq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vqaddq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int32x4_t vqdmlal_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t vqdmlal_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai int32x4_t vqdmlal_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t vqdmlal_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int32x4_t vqdmlsl_n_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16_t  __p2 
)
__ai int64x2_t vqdmlsl_n_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32_t  __p2 
)
__ai int32x4_t vqdmlsl_s16 ( int32x4_t  __p0,
int16x4_t  __p1,
int16x4_t  __p2 
)
__ai int64x2_t vqdmlsl_s32 ( int64x2_t  __p0,
int32x2_t  __p1,
int32x2_t  __p2 
)
__ai int16x4_t vqdmulh_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int32x2_t vqdmulh_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai int16x4_t vqdmulh_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqdmulh_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vqdmulhq_n_s16 ( int16x8_t  __p0,
int16_t  __p1 
)
__ai int32x4_t vqdmulhq_n_s32 ( int32x4_t  __p0,
int32_t  __p1 
)
__ai int16x8_t vqdmulhq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqdmulhq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x4_t vqdmull_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int64x2_t vqdmull_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai int32x4_t vqdmull_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vqdmull_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vqmovn_s16 ( int16x8_t  __p0)
__ai int16x4_t vqmovn_s32 ( int32x4_t  __p0)
__ai int32x2_t vqmovn_s64 ( int64x2_t  __p0)
__ai uint8x8_t vqmovn_u16 ( uint16x8_t  __p0)
__ai uint16x4_t vqmovn_u32 ( uint32x4_t  __p0)
__ai uint32x2_t vqmovn_u64 ( uint64x2_t  __p0)
__ai uint8x8_t vqmovun_s16 ( int16x8_t  __p0)
__ai uint16x4_t vqmovun_s32 ( int32x4_t  __p0)
__ai uint32x2_t vqmovun_s64 ( int64x2_t  __p0)
__ai int16x4_t vqneg_s16 ( int16x4_t  __p0)
__ai int32x2_t vqneg_s32 ( int32x2_t  __p0)
__ai int8x8_t vqneg_s8 ( int8x8_t  __p0)
__ai int16x8_t vqnegq_s16 ( int16x8_t  __p0)
__ai int32x4_t vqnegq_s32 ( int32x4_t  __p0)
__ai int8x16_t vqnegq_s8 ( int8x16_t  __p0)
__ai int16x4_t vqrdmulh_n_s16 ( int16x4_t  __p0,
int16_t  __p1 
)
__ai int32x2_t vqrdmulh_n_s32 ( int32x2_t  __p0,
int32_t  __p1 
)
__ai int16x4_t vqrdmulh_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqrdmulh_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vqrdmulhq_n_s16 ( int16x8_t  __p0,
int16_t  __p1 
)
__ai int32x4_t vqrdmulhq_n_s32 ( int32x4_t  __p0,
int32_t  __p1 
)
__ai int16x8_t vqrdmulhq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqrdmulhq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int16x4_t vqrshl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqrshl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vqrshl_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vqrshl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vqrshl_u16 ( uint16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vqrshl_u32 ( uint32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint64x1_t vqrshl_u64 ( uint64x1_t  __p0,
int64x1_t  __p1 
)
__ai uint8x8_t vqrshl_u8 ( uint8x8_t  __p0,
int8x8_t  __p1 
)
__ai int16x8_t vqrshlq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqrshlq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vqrshlq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vqrshlq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vqrshlq_u16 ( uint16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vqrshlq_u32 ( uint32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint64x2_t vqrshlq_u64 ( uint64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x16_t vqrshlq_u8 ( uint8x16_t  __p0,
int8x16_t  __p1 
)
__ai int16x4_t vqshl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqshl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vqshl_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vqshl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vqshl_u16 ( uint16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vqshl_u32 ( uint32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint64x1_t vqshl_u64 ( uint64x1_t  __p0,
int64x1_t  __p1 
)
__ai uint8x8_t vqshl_u8 ( uint8x8_t  __p0,
int8x8_t  __p1 
)
__ai int16x8_t vqshlq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqshlq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vqshlq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vqshlq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vqshlq_u16 ( uint16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vqshlq_u32 ( uint32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint64x2_t vqshlq_u64 ( uint64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x16_t vqshlq_u8 ( uint8x16_t  __p0,
int8x16_t  __p1 
)
__ai int16x4_t vqsub_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vqsub_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vqsub_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vqsub_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vqsub_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vqsub_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vqsub_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vqsub_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vqsubq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vqsubq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vqsubq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vqsubq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vqsubq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vqsubq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vqsubq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vqsubq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int8x8_t vraddhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t vraddhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t vraddhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t vraddhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t vraddhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t vraddhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai float32x2_t vrecpe_f32 ( float32x2_t  __p0)
__ai uint32x2_t vrecpe_u32 ( uint32x2_t  __p0)
__ai float32x4_t vrecpeq_f32 ( float32x4_t  __p0)
__ai uint32x4_t vrecpeq_u32 ( uint32x4_t  __p0)
__ai float32x2_t vrecps_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai float32x4_t vrecpsq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai float16x4_t vreinterpret_f16_f32 ( float32x2_t  __p0)
__ai float16x4_t vreinterpret_f16_p16 ( poly16x4_t  __p0)
__ai float16x4_t vreinterpret_f16_p8 ( poly8x8_t  __p0)
__ai float16x4_t vreinterpret_f16_s16 ( int16x4_t  __p0)
__ai float16x4_t vreinterpret_f16_s32 ( int32x2_t  __p0)
__ai float16x4_t vreinterpret_f16_s64 ( int64x1_t  __p0)
__ai float16x4_t vreinterpret_f16_s8 ( int8x8_t  __p0)
__ai float16x4_t vreinterpret_f16_u16 ( uint16x4_t  __p0)
__ai float16x4_t vreinterpret_f16_u32 ( uint32x2_t  __p0)
__ai float16x4_t vreinterpret_f16_u64 ( uint64x1_t  __p0)
__ai float16x4_t vreinterpret_f16_u8 ( uint8x8_t  __p0)
__ai float32x2_t vreinterpret_f32_f16 ( float16x4_t  __p0)
__ai float32x2_t vreinterpret_f32_p16 ( poly16x4_t  __p0)
__ai float32x2_t vreinterpret_f32_p8 ( poly8x8_t  __p0)
__ai float32x2_t vreinterpret_f32_s16 ( int16x4_t  __p0)
__ai float32x2_t vreinterpret_f32_s32 ( int32x2_t  __p0)
__ai float32x2_t vreinterpret_f32_s64 ( int64x1_t  __p0)
__ai float32x2_t vreinterpret_f32_s8 ( int8x8_t  __p0)
__ai float32x2_t vreinterpret_f32_u16 ( uint16x4_t  __p0)
__ai float32x2_t vreinterpret_f32_u32 ( uint32x2_t  __p0)
__ai float32x2_t vreinterpret_f32_u64 ( uint64x1_t  __p0)
__ai float32x2_t vreinterpret_f32_u8 ( uint8x8_t  __p0)
__ai poly16x4_t vreinterpret_p16_f16 ( float16x4_t  __p0)
__ai poly16x4_t vreinterpret_p16_f32 ( float32x2_t  __p0)
__ai poly16x4_t vreinterpret_p16_p8 ( poly8x8_t  __p0)
__ai poly16x4_t vreinterpret_p16_s16 ( int16x4_t  __p0)
__ai poly16x4_t vreinterpret_p16_s32 ( int32x2_t  __p0)
__ai poly16x4_t vreinterpret_p16_s64 ( int64x1_t  __p0)
__ai poly16x4_t vreinterpret_p16_s8 ( int8x8_t  __p0)
__ai poly16x4_t vreinterpret_p16_u16 ( uint16x4_t  __p0)
__ai poly16x4_t vreinterpret_p16_u32 ( uint32x2_t  __p0)
__ai poly16x4_t vreinterpret_p16_u64 ( uint64x1_t  __p0)
__ai poly16x4_t vreinterpret_p16_u8 ( uint8x8_t  __p0)
__ai poly8x8_t vreinterpret_p8_f16 ( float16x4_t  __p0)
__ai poly8x8_t vreinterpret_p8_f32 ( float32x2_t  __p0)
__ai poly8x8_t vreinterpret_p8_p16 ( poly16x4_t  __p0)
__ai poly8x8_t vreinterpret_p8_s16 ( int16x4_t  __p0)
__ai poly8x8_t vreinterpret_p8_s32 ( int32x2_t  __p0)
__ai poly8x8_t vreinterpret_p8_s64 ( int64x1_t  __p0)
__ai poly8x8_t vreinterpret_p8_s8 ( int8x8_t  __p0)
__ai poly8x8_t vreinterpret_p8_u16 ( uint16x4_t  __p0)
__ai poly8x8_t vreinterpret_p8_u32 ( uint32x2_t  __p0)
__ai poly8x8_t vreinterpret_p8_u64 ( uint64x1_t  __p0)
__ai poly8x8_t vreinterpret_p8_u8 ( uint8x8_t  __p0)
__ai int16x4_t vreinterpret_s16_f16 ( float16x4_t  __p0)
__ai int16x4_t vreinterpret_s16_f32 ( float32x2_t  __p0)
__ai int16x4_t vreinterpret_s16_p16 ( poly16x4_t  __p0)
__ai int16x4_t vreinterpret_s16_p8 ( poly8x8_t  __p0)
__ai int16x4_t vreinterpret_s16_s32 ( int32x2_t  __p0)
__ai int16x4_t vreinterpret_s16_s64 ( int64x1_t  __p0)
__ai int16x4_t vreinterpret_s16_s8 ( int8x8_t  __p0)
__ai int16x4_t vreinterpret_s16_u16 ( uint16x4_t  __p0)
__ai int16x4_t vreinterpret_s16_u32 ( uint32x2_t  __p0)
__ai int16x4_t vreinterpret_s16_u64 ( uint64x1_t  __p0)
__ai int16x4_t vreinterpret_s16_u8 ( uint8x8_t  __p0)
__ai int32x2_t vreinterpret_s32_f16 ( float16x4_t  __p0)
__ai int32x2_t vreinterpret_s32_f32 ( float32x2_t  __p0)
__ai int32x2_t vreinterpret_s32_p16 ( poly16x4_t  __p0)
__ai int32x2_t vreinterpret_s32_p8 ( poly8x8_t  __p0)
__ai int32x2_t vreinterpret_s32_s16 ( int16x4_t  __p0)
__ai int32x2_t vreinterpret_s32_s64 ( int64x1_t  __p0)
__ai int32x2_t vreinterpret_s32_s8 ( int8x8_t  __p0)
__ai int32x2_t vreinterpret_s32_u16 ( uint16x4_t  __p0)
__ai int32x2_t vreinterpret_s32_u32 ( uint32x2_t  __p0)
__ai int32x2_t vreinterpret_s32_u64 ( uint64x1_t  __p0)
__ai int32x2_t vreinterpret_s32_u8 ( uint8x8_t  __p0)
__ai int64x1_t vreinterpret_s64_f16 ( float16x4_t  __p0)
__ai int64x1_t vreinterpret_s64_f32 ( float32x2_t  __p0)
__ai int64x1_t vreinterpret_s64_p16 ( poly16x4_t  __p0)
__ai int64x1_t vreinterpret_s64_p8 ( poly8x8_t  __p0)
__ai int64x1_t vreinterpret_s64_s16 ( int16x4_t  __p0)
__ai int64x1_t vreinterpret_s64_s32 ( int32x2_t  __p0)
__ai int64x1_t vreinterpret_s64_s8 ( int8x8_t  __p0)
__ai int64x1_t vreinterpret_s64_u16 ( uint16x4_t  __p0)
__ai int64x1_t vreinterpret_s64_u32 ( uint32x2_t  __p0)
__ai int64x1_t vreinterpret_s64_u64 ( uint64x1_t  __p0)
__ai int64x1_t vreinterpret_s64_u8 ( uint8x8_t  __p0)
__ai int8x8_t vreinterpret_s8_f16 ( float16x4_t  __p0)
__ai int8x8_t vreinterpret_s8_f32 ( float32x2_t  __p0)
__ai int8x8_t vreinterpret_s8_p16 ( poly16x4_t  __p0)
__ai int8x8_t vreinterpret_s8_p8 ( poly8x8_t  __p0)
__ai int8x8_t vreinterpret_s8_s16 ( int16x4_t  __p0)
__ai int8x8_t vreinterpret_s8_s32 ( int32x2_t  __p0)
__ai int8x8_t vreinterpret_s8_s64 ( int64x1_t  __p0)
__ai int8x8_t vreinterpret_s8_u16 ( uint16x4_t  __p0)
__ai int8x8_t vreinterpret_s8_u32 ( uint32x2_t  __p0)
__ai int8x8_t vreinterpret_s8_u64 ( uint64x1_t  __p0)
__ai int8x8_t vreinterpret_s8_u8 ( uint8x8_t  __p0)
__ai uint16x4_t vreinterpret_u16_f16 ( float16x4_t  __p0)
__ai uint16x4_t vreinterpret_u16_f32 ( float32x2_t  __p0)
__ai uint16x4_t vreinterpret_u16_p16 ( poly16x4_t  __p0)
__ai uint16x4_t vreinterpret_u16_p8 ( poly8x8_t  __p0)
__ai uint16x4_t vreinterpret_u16_s16 ( int16x4_t  __p0)
__ai uint16x4_t vreinterpret_u16_s32 ( int32x2_t  __p0)
__ai uint16x4_t vreinterpret_u16_s64 ( int64x1_t  __p0)
__ai uint16x4_t vreinterpret_u16_s8 ( int8x8_t  __p0)
__ai uint16x4_t vreinterpret_u16_u32 ( uint32x2_t  __p0)
__ai uint16x4_t vreinterpret_u16_u64 ( uint64x1_t  __p0)
__ai uint16x4_t vreinterpret_u16_u8 ( uint8x8_t  __p0)
__ai uint32x2_t vreinterpret_u32_f16 ( float16x4_t  __p0)
__ai uint32x2_t vreinterpret_u32_f32 ( float32x2_t  __p0)
__ai uint32x2_t vreinterpret_u32_p16 ( poly16x4_t  __p0)
__ai uint32x2_t vreinterpret_u32_p8 ( poly8x8_t  __p0)
__ai uint32x2_t vreinterpret_u32_s16 ( int16x4_t  __p0)
__ai uint32x2_t vreinterpret_u32_s32 ( int32x2_t  __p0)
__ai uint32x2_t vreinterpret_u32_s64 ( int64x1_t  __p0)
__ai uint32x2_t vreinterpret_u32_s8 ( int8x8_t  __p0)
__ai uint32x2_t vreinterpret_u32_u16 ( uint16x4_t  __p0)
__ai uint32x2_t vreinterpret_u32_u64 ( uint64x1_t  __p0)
__ai uint32x2_t vreinterpret_u32_u8 ( uint8x8_t  __p0)
__ai uint64x1_t vreinterpret_u64_f16 ( float16x4_t  __p0)
__ai uint64x1_t vreinterpret_u64_f32 ( float32x2_t  __p0)
__ai uint64x1_t vreinterpret_u64_p16 ( poly16x4_t  __p0)
__ai uint64x1_t vreinterpret_u64_p8 ( poly8x8_t  __p0)
__ai uint64x1_t vreinterpret_u64_s16 ( int16x4_t  __p0)
__ai uint64x1_t vreinterpret_u64_s32 ( int32x2_t  __p0)
__ai uint64x1_t vreinterpret_u64_s64 ( int64x1_t  __p0)
__ai uint64x1_t vreinterpret_u64_s8 ( int8x8_t  __p0)
__ai uint64x1_t vreinterpret_u64_u16 ( uint16x4_t  __p0)
__ai uint64x1_t vreinterpret_u64_u32 ( uint32x2_t  __p0)
__ai uint64x1_t vreinterpret_u64_u8 ( uint8x8_t  __p0)
__ai uint8x8_t vreinterpret_u8_f16 ( float16x4_t  __p0)
__ai uint8x8_t vreinterpret_u8_f32 ( float32x2_t  __p0)
__ai uint8x8_t vreinterpret_u8_p16 ( poly16x4_t  __p0)
__ai uint8x8_t vreinterpret_u8_p8 ( poly8x8_t  __p0)
__ai uint8x8_t vreinterpret_u8_s16 ( int16x4_t  __p0)
__ai uint8x8_t vreinterpret_u8_s32 ( int32x2_t  __p0)
__ai uint8x8_t vreinterpret_u8_s64 ( int64x1_t  __p0)
__ai uint8x8_t vreinterpret_u8_s8 ( int8x8_t  __p0)
__ai uint8x8_t vreinterpret_u8_u16 ( uint16x4_t  __p0)
__ai uint8x8_t vreinterpret_u8_u32 ( uint32x2_t  __p0)
__ai uint8x8_t vreinterpret_u8_u64 ( uint64x1_t  __p0)
__ai float16x8_t vreinterpretq_f16_f32 ( float32x4_t  __p0)
__ai float16x8_t vreinterpretq_f16_p16 ( poly16x8_t  __p0)
__ai float16x8_t vreinterpretq_f16_p8 ( poly8x16_t  __p0)
__ai float16x8_t vreinterpretq_f16_s16 ( int16x8_t  __p0)
__ai float16x8_t vreinterpretq_f16_s32 ( int32x4_t  __p0)
__ai float16x8_t vreinterpretq_f16_s64 ( int64x2_t  __p0)
__ai float16x8_t vreinterpretq_f16_s8 ( int8x16_t  __p0)
__ai float16x8_t vreinterpretq_f16_u16 ( uint16x8_t  __p0)
__ai float16x8_t vreinterpretq_f16_u32 ( uint32x4_t  __p0)
__ai float16x8_t vreinterpretq_f16_u64 ( uint64x2_t  __p0)
__ai float16x8_t vreinterpretq_f16_u8 ( uint8x16_t  __p0)
__ai float32x4_t vreinterpretq_f32_f16 ( float16x8_t  __p0)
__ai float32x4_t vreinterpretq_f32_p16 ( poly16x8_t  __p0)
__ai float32x4_t vreinterpretq_f32_p8 ( poly8x16_t  __p0)
__ai float32x4_t vreinterpretq_f32_s16 ( int16x8_t  __p0)
__ai float32x4_t vreinterpretq_f32_s32 ( int32x4_t  __p0)
__ai float32x4_t vreinterpretq_f32_s64 ( int64x2_t  __p0)
__ai float32x4_t vreinterpretq_f32_s8 ( int8x16_t  __p0)
__ai float32x4_t vreinterpretq_f32_u16 ( uint16x8_t  __p0)
__ai float32x4_t vreinterpretq_f32_u32 ( uint32x4_t  __p0)
__ai float32x4_t vreinterpretq_f32_u64 ( uint64x2_t  __p0)
__ai float32x4_t vreinterpretq_f32_u8 ( uint8x16_t  __p0)
__ai poly16x8_t vreinterpretq_p16_f16 ( float16x8_t  __p0)
__ai poly16x8_t vreinterpretq_p16_f32 ( float32x4_t  __p0)
__ai poly16x8_t vreinterpretq_p16_p8 ( poly8x16_t  __p0)
__ai poly16x8_t vreinterpretq_p16_s16 ( int16x8_t  __p0)
__ai poly16x8_t vreinterpretq_p16_s32 ( int32x4_t  __p0)
__ai poly16x8_t vreinterpretq_p16_s64 ( int64x2_t  __p0)
__ai poly16x8_t vreinterpretq_p16_s8 ( int8x16_t  __p0)
__ai poly16x8_t vreinterpretq_p16_u16 ( uint16x8_t  __p0)
__ai poly16x8_t vreinterpretq_p16_u32 ( uint32x4_t  __p0)
__ai poly16x8_t vreinterpretq_p16_u64 ( uint64x2_t  __p0)
__ai poly16x8_t vreinterpretq_p16_u8 ( uint8x16_t  __p0)
__ai poly8x16_t vreinterpretq_p8_f16 ( float16x8_t  __p0)
__ai poly8x16_t vreinterpretq_p8_f32 ( float32x4_t  __p0)
__ai poly8x16_t vreinterpretq_p8_p16 ( poly16x8_t  __p0)
__ai poly8x16_t vreinterpretq_p8_s16 ( int16x8_t  __p0)
__ai poly8x16_t vreinterpretq_p8_s32 ( int32x4_t  __p0)
__ai poly8x16_t vreinterpretq_p8_s64 ( int64x2_t  __p0)
__ai poly8x16_t vreinterpretq_p8_s8 ( int8x16_t  __p0)
__ai poly8x16_t vreinterpretq_p8_u16 ( uint16x8_t  __p0)
__ai poly8x16_t vreinterpretq_p8_u32 ( uint32x4_t  __p0)
__ai poly8x16_t vreinterpretq_p8_u64 ( uint64x2_t  __p0)
__ai poly8x16_t vreinterpretq_p8_u8 ( uint8x16_t  __p0)
__ai int16x8_t vreinterpretq_s16_f16 ( float16x8_t  __p0)
__ai int16x8_t vreinterpretq_s16_f32 ( float32x4_t  __p0)
__ai int16x8_t vreinterpretq_s16_p16 ( poly16x8_t  __p0)
__ai int16x8_t vreinterpretq_s16_p8 ( poly8x16_t  __p0)
__ai int16x8_t vreinterpretq_s16_s32 ( int32x4_t  __p0)
__ai int16x8_t vreinterpretq_s16_s64 ( int64x2_t  __p0)
__ai int16x8_t vreinterpretq_s16_s8 ( int8x16_t  __p0)
__ai int16x8_t vreinterpretq_s16_u16 ( uint16x8_t  __p0)
__ai int16x8_t vreinterpretq_s16_u32 ( uint32x4_t  __p0)
__ai int16x8_t vreinterpretq_s16_u64 ( uint64x2_t  __p0)
__ai int16x8_t vreinterpretq_s16_u8 ( uint8x16_t  __p0)
__ai int32x4_t vreinterpretq_s32_f16 ( float16x8_t  __p0)
__ai int32x4_t vreinterpretq_s32_f32 ( float32x4_t  __p0)
__ai int32x4_t vreinterpretq_s32_p16 ( poly16x8_t  __p0)
__ai int32x4_t vreinterpretq_s32_p8 ( poly8x16_t  __p0)
__ai int32x4_t vreinterpretq_s32_s16 ( int16x8_t  __p0)
__ai int32x4_t vreinterpretq_s32_s64 ( int64x2_t  __p0)
__ai int32x4_t vreinterpretq_s32_s8 ( int8x16_t  __p0)
__ai int32x4_t vreinterpretq_s32_u16 ( uint16x8_t  __p0)
__ai int32x4_t vreinterpretq_s32_u32 ( uint32x4_t  __p0)
__ai int32x4_t vreinterpretq_s32_u64 ( uint64x2_t  __p0)
__ai int32x4_t vreinterpretq_s32_u8 ( uint8x16_t  __p0)
__ai int64x2_t vreinterpretq_s64_f16 ( float16x8_t  __p0)
__ai int64x2_t vreinterpretq_s64_f32 ( float32x4_t  __p0)
__ai int64x2_t vreinterpretq_s64_p16 ( poly16x8_t  __p0)
__ai int64x2_t vreinterpretq_s64_p8 ( poly8x16_t  __p0)
__ai int64x2_t vreinterpretq_s64_s16 ( int16x8_t  __p0)
__ai int64x2_t vreinterpretq_s64_s32 ( int32x4_t  __p0)
__ai int64x2_t vreinterpretq_s64_s8 ( int8x16_t  __p0)
__ai int64x2_t vreinterpretq_s64_u16 ( uint16x8_t  __p0)
__ai int64x2_t vreinterpretq_s64_u32 ( uint32x4_t  __p0)
__ai int64x2_t vreinterpretq_s64_u64 ( uint64x2_t  __p0)
__ai int64x2_t vreinterpretq_s64_u8 ( uint8x16_t  __p0)
__ai int8x16_t vreinterpretq_s8_f16 ( float16x8_t  __p0)
__ai int8x16_t vreinterpretq_s8_f32 ( float32x4_t  __p0)
__ai int8x16_t vreinterpretq_s8_p16 ( poly16x8_t  __p0)
__ai int8x16_t vreinterpretq_s8_p8 ( poly8x16_t  __p0)
__ai int8x16_t vreinterpretq_s8_s16 ( int16x8_t  __p0)
__ai int8x16_t vreinterpretq_s8_s32 ( int32x4_t  __p0)
__ai int8x16_t vreinterpretq_s8_s64 ( int64x2_t  __p0)
__ai int8x16_t vreinterpretq_s8_u16 ( uint16x8_t  __p0)
__ai int8x16_t vreinterpretq_s8_u32 ( uint32x4_t  __p0)
__ai int8x16_t vreinterpretq_s8_u64 ( uint64x2_t  __p0)
__ai int8x16_t vreinterpretq_s8_u8 ( uint8x16_t  __p0)
__ai uint16x8_t vreinterpretq_u16_f16 ( float16x8_t  __p0)
__ai uint16x8_t vreinterpretq_u16_f32 ( float32x4_t  __p0)
__ai uint16x8_t vreinterpretq_u16_p16 ( poly16x8_t  __p0)
__ai uint16x8_t vreinterpretq_u16_p8 ( poly8x16_t  __p0)
__ai uint16x8_t vreinterpretq_u16_s16 ( int16x8_t  __p0)
__ai uint16x8_t vreinterpretq_u16_s32 ( int32x4_t  __p0)
__ai uint16x8_t vreinterpretq_u16_s64 ( int64x2_t  __p0)
__ai uint16x8_t vreinterpretq_u16_s8 ( int8x16_t  __p0)
__ai uint16x8_t vreinterpretq_u16_u32 ( uint32x4_t  __p0)
__ai uint16x8_t vreinterpretq_u16_u64 ( uint64x2_t  __p0)
__ai uint16x8_t vreinterpretq_u16_u8 ( uint8x16_t  __p0)
__ai uint32x4_t vreinterpretq_u32_f16 ( float16x8_t  __p0)
__ai uint32x4_t vreinterpretq_u32_f32 ( float32x4_t  __p0)
__ai uint32x4_t vreinterpretq_u32_p16 ( poly16x8_t  __p0)
__ai uint32x4_t vreinterpretq_u32_p8 ( poly8x16_t  __p0)
__ai uint32x4_t vreinterpretq_u32_s16 ( int16x8_t  __p0)
__ai uint32x4_t vreinterpretq_u32_s32 ( int32x4_t  __p0)
__ai uint32x4_t vreinterpretq_u32_s64 ( int64x2_t  __p0)
__ai uint32x4_t vreinterpretq_u32_s8 ( int8x16_t  __p0)
__ai uint32x4_t vreinterpretq_u32_u16 ( uint16x8_t  __p0)
__ai uint32x4_t vreinterpretq_u32_u64 ( uint64x2_t  __p0)
__ai uint32x4_t vreinterpretq_u32_u8 ( uint8x16_t  __p0)
__ai uint64x2_t vreinterpretq_u64_f16 ( float16x8_t  __p0)
__ai uint64x2_t vreinterpretq_u64_f32 ( float32x4_t  __p0)
__ai uint64x2_t vreinterpretq_u64_p16 ( poly16x8_t  __p0)
__ai uint64x2_t vreinterpretq_u64_p8 ( poly8x16_t  __p0)
__ai uint64x2_t vreinterpretq_u64_s16 ( int16x8_t  __p0)
__ai uint64x2_t vreinterpretq_u64_s32 ( int32x4_t  __p0)
__ai uint64x2_t vreinterpretq_u64_s64 ( int64x2_t  __p0)
__ai uint64x2_t vreinterpretq_u64_s8 ( int8x16_t  __p0)
__ai uint64x2_t vreinterpretq_u64_u16 ( uint16x8_t  __p0)
__ai uint64x2_t vreinterpretq_u64_u32 ( uint32x4_t  __p0)
__ai uint64x2_t vreinterpretq_u64_u8 ( uint8x16_t  __p0)
__ai uint8x16_t vreinterpretq_u8_f16 ( float16x8_t  __p0)
__ai uint8x16_t vreinterpretq_u8_f32 ( float32x4_t  __p0)
__ai uint8x16_t vreinterpretq_u8_p16 ( poly16x8_t  __p0)
__ai uint8x16_t vreinterpretq_u8_p8 ( poly8x16_t  __p0)
__ai uint8x16_t vreinterpretq_u8_s16 ( int16x8_t  __p0)
__ai uint8x16_t vreinterpretq_u8_s32 ( int32x4_t  __p0)
__ai uint8x16_t vreinterpretq_u8_s64 ( int64x2_t  __p0)
__ai uint8x16_t vreinterpretq_u8_s8 ( int8x16_t  __p0)
__ai uint8x16_t vreinterpretq_u8_u16 ( uint16x8_t  __p0)
__ai uint8x16_t vreinterpretq_u8_u32 ( uint32x4_t  __p0)
__ai uint8x16_t vreinterpretq_u8_u64 ( uint64x2_t  __p0)
__ai poly8x8_t vrev16_p8 ( poly8x8_t  __p0)
__ai int8x8_t vrev16_s8 ( int8x8_t  __p0)
__ai uint8x8_t vrev16_u8 ( uint8x8_t  __p0)
__ai poly8x16_t vrev16q_p8 ( poly8x16_t  __p0)
__ai int8x16_t vrev16q_s8 ( int8x16_t  __p0)
__ai uint8x16_t vrev16q_u8 ( uint8x16_t  __p0)
__ai poly16x4_t vrev32_p16 ( poly16x4_t  __p0)
__ai poly8x8_t vrev32_p8 ( poly8x8_t  __p0)
__ai int16x4_t vrev32_s16 ( int16x4_t  __p0)
__ai int8x8_t vrev32_s8 ( int8x8_t  __p0)
__ai uint16x4_t vrev32_u16 ( uint16x4_t  __p0)
__ai uint8x8_t vrev32_u8 ( uint8x8_t  __p0)
__ai poly16x8_t vrev32q_p16 ( poly16x8_t  __p0)
__ai poly8x16_t vrev32q_p8 ( poly8x16_t  __p0)
__ai int16x8_t vrev32q_s16 ( int16x8_t  __p0)
__ai int8x16_t vrev32q_s8 ( int8x16_t  __p0)
__ai uint16x8_t vrev32q_u16 ( uint16x8_t  __p0)
__ai uint8x16_t vrev32q_u8 ( uint8x16_t  __p0)
__ai float32x2_t vrev64_f32 ( float32x2_t  __p0)
__ai poly16x4_t vrev64_p16 ( poly16x4_t  __p0)
__ai poly8x8_t vrev64_p8 ( poly8x8_t  __p0)
__ai int16x4_t vrev64_s16 ( int16x4_t  __p0)
__ai int32x2_t vrev64_s32 ( int32x2_t  __p0)
__ai int8x8_t vrev64_s8 ( int8x8_t  __p0)
__ai uint16x4_t vrev64_u16 ( uint16x4_t  __p0)
__ai uint32x2_t vrev64_u32 ( uint32x2_t  __p0)
__ai uint8x8_t vrev64_u8 ( uint8x8_t  __p0)
__ai float32x4_t vrev64q_f32 ( float32x4_t  __p0)
__ai poly16x8_t vrev64q_p16 ( poly16x8_t  __p0)
__ai poly8x16_t vrev64q_p8 ( poly8x16_t  __p0)
__ai int16x8_t vrev64q_s16 ( int16x8_t  __p0)
__ai int32x4_t vrev64q_s32 ( int32x4_t  __p0)
__ai int8x16_t vrev64q_s8 ( int8x16_t  __p0)
__ai uint16x8_t vrev64q_u16 ( uint16x8_t  __p0)
__ai uint32x4_t vrev64q_u32 ( uint32x4_t  __p0)
__ai uint8x16_t vrev64q_u8 ( uint8x16_t  __p0)
__ai int16x4_t vrhadd_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vrhadd_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8_t vrhadd_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vrhadd_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vrhadd_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vrhadd_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int16x8_t vrhaddq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vrhaddq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16_t vrhaddq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vrhaddq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vrhaddq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vrhaddq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int16x4_t vrshl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vrshl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vrshl_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vrshl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vrshl_u16 ( uint16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vrshl_u32 ( uint32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint64x1_t vrshl_u64 ( uint64x1_t  __p0,
int64x1_t  __p1 
)
__ai uint8x8_t vrshl_u8 ( uint8x8_t  __p0,
int8x8_t  __p1 
)
__ai int16x8_t vrshlq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vrshlq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vrshlq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vrshlq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vrshlq_u16 ( uint16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vrshlq_u32 ( uint32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint64x2_t vrshlq_u64 ( uint64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x16_t vrshlq_u8 ( uint8x16_t  __p0,
int8x16_t  __p1 
)
__ai float32x2_t vrsqrte_f32 ( float32x2_t  __p0)
__ai uint32x2_t vrsqrte_u32 ( uint32x2_t  __p0)
__ai float32x4_t vrsqrteq_f32 ( float32x4_t  __p0)
__ai uint32x4_t vrsqrteq_u32 ( uint32x4_t  __p0)
__ai float32x2_t vrsqrts_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai float32x4_t vrsqrtsq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int8x8_t vrsubhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t vrsubhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t vrsubhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t vrsubhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t vrsubhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t vrsubhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int16x4_t vshl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vshl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vshl_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vshl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vshl_u16 ( uint16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vshl_u32 ( uint32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint64x1_t vshl_u64 ( uint64x1_t  __p0,
int64x1_t  __p1 
)
__ai uint8x8_t vshl_u8 ( uint8x8_t  __p0,
int8x8_t  __p1 
)
__ai int16x8_t vshlq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vshlq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vshlq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vshlq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vshlq_u16 ( uint16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vshlq_u32 ( uint32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint64x2_t vshlq_u64 ( uint64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x16_t vshlq_u8 ( uint8x16_t  __p0,
int8x16_t  __p1 
)
__ai float32x2_t vsub_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai int16x4_t vsub_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2_t vsub_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int64x1_t vsub_s64 ( int64x1_t  __p0,
int64x1_t  __p1 
)
__ai int8x8_t vsub_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vsub_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vsub_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint64x1_t vsub_u64 ( uint64x1_t  __p0,
uint64x1_t  __p1 
)
__ai uint8x8_t vsub_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vsubhn_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int16x4_t vsubhn_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int32x2_t vsubhn_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai uint8x8_t vsubhn_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint16x4_t vsubhn_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint32x2_t vsubhn_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai int32x4_t vsubl_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vsubl_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vsubl_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vsubl_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vsubl_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vsubl_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4_t vsubq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai int16x8_t vsubq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4_t vsubq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int64x2_t vsubq_s64 ( int64x2_t  __p0,
int64x2_t  __p1 
)
__ai int8x16_t vsubq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vsubq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vsubq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint64x2_t vsubq_u64 ( uint64x2_t  __p0,
uint64x2_t  __p1 
)
__ai uint8x16_t vsubq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai int32x4_t vsubw_s16 ( int32x4_t  __p0,
int16x4_t  __p1 
)
__ai int64x2_t vsubw_s32 ( int64x2_t  __p0,
int32x2_t  __p1 
)
__ai int16x8_t vsubw_s8 ( int16x8_t  __p0,
int8x8_t  __p1 
)
__ai uint32x4_t vsubw_u16 ( uint32x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint64x2_t vsubw_u32 ( uint64x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint16x8_t vsubw_u8 ( uint16x8_t  __p0,
uint8x8_t  __p1 
)
__ai poly8x8_t vtbl1_p8 ( poly8x8_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vtbl1_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint8x8_t vtbl1_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai poly8x8_t vtbl2_p8 ( poly8x8x2_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vtbl2_s8 ( int8x8x2_t  __p0,
int8x8_t  __p1 
)
__ai uint8x8_t vtbl2_u8 ( uint8x8x2_t  __p0,
uint8x8_t  __p1 
)
__ai poly8x8_t vtbl3_p8 ( poly8x8x3_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vtbl3_s8 ( int8x8x3_t  __p0,
int8x8_t  __p1 
)
__ai uint8x8_t vtbl3_u8 ( uint8x8x3_t  __p0,
uint8x8_t  __p1 
)
__ai poly8x8_t vtbl4_p8 ( poly8x8x4_t  __p0,
uint8x8_t  __p1 
)
__ai int8x8_t vtbl4_s8 ( int8x8x4_t  __p0,
int8x8_t  __p1 
)
__ai uint8x8_t vtbl4_u8 ( uint8x8x4_t  __p0,
uint8x8_t  __p1 
)
__ai poly8x8_t vtbx1_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1,
uint8x8_t  __p2 
)
__ai int8x8_t vtbx1_s8 ( int8x8_t  __p0,
int8x8_t  __p1,
int8x8_t  __p2 
)
__ai uint8x8_t vtbx1_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1,
uint8x8_t  __p2 
)
__ai poly8x8_t vtbx2_p8 ( poly8x8_t  __p0,
poly8x8x2_t  __p1,
uint8x8_t  __p2 
)
__ai int8x8_t vtbx2_s8 ( int8x8_t  __p0,
int8x8x2_t  __p1,
int8x8_t  __p2 
)
__ai uint8x8_t vtbx2_u8 ( uint8x8_t  __p0,
uint8x8x2_t  __p1,
uint8x8_t  __p2 
)
__ai poly8x8_t vtbx3_p8 ( poly8x8_t  __p0,
poly8x8x3_t  __p1,
uint8x8_t  __p2 
)
__ai int8x8_t vtbx3_s8 ( int8x8_t  __p0,
int8x8x3_t  __p1,
int8x8_t  __p2 
)
__ai uint8x8_t vtbx3_u8 ( uint8x8_t  __p0,
uint8x8x3_t  __p1,
uint8x8_t  __p2 
)
__ai poly8x8_t vtbx4_p8 ( poly8x8_t  __p0,
poly8x8x4_t  __p1,
uint8x8_t  __p2 
)
__ai int8x8_t vtbx4_s8 ( int8x8_t  __p0,
int8x8x4_t  __p1,
int8x8_t  __p2 
)
__ai uint8x8_t vtbx4_u8 ( uint8x8_t  __p0,
uint8x8x4_t  __p1,
uint8x8_t  __p2 
)
__ai float32x2x2_t vtrn_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai poly16x4x2_t vtrn_p16 ( poly16x4_t  __p0,
poly16x4_t  __p1 
)
__ai poly8x8x2_t vtrn_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int16x4x2_t vtrn_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2x2_t vtrn_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8x2_t vtrn_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4x2_t vtrn_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2x2_t vtrn_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8x2_t vtrn_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4x2_t vtrnq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai poly16x8x2_t vtrnq_p16 ( poly16x8_t  __p0,
poly16x8_t  __p1 
)
__ai poly8x16x2_t vtrnq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai int16x8x2_t vtrnq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4x2_t vtrnq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16x2_t vtrnq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8x2_t vtrnq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4x2_t vtrnq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16x2_t vtrnq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai uint16x4_t vtst_p16 ( poly16x4_t  __p0,
poly16x4_t  __p1 
)
__ai uint8x8_t vtst_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai uint16x4_t vtst_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai uint32x2_t vtst_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai uint8x8_t vtst_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4_t vtst_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2_t vtst_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8_t vtst_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai uint16x8_t vtstq_p16 ( poly16x8_t  __p0,
poly16x8_t  __p1 
)
__ai uint8x16_t vtstq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai uint16x8_t vtstq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai uint32x4_t vtstq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai uint8x16_t vtstq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8_t vtstq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4_t vtstq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16_t vtstq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2x2_t vuzp_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai poly16x4x2_t vuzp_p16 ( poly16x4_t  __p0,
poly16x4_t  __p1 
)
__ai poly8x8x2_t vuzp_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int16x4x2_t vuzp_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2x2_t vuzp_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8x2_t vuzp_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4x2_t vuzp_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2x2_t vuzp_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8x2_t vuzp_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4x2_t vuzpq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai poly16x8x2_t vuzpq_p16 ( poly16x8_t  __p0,
poly16x8_t  __p1 
)
__ai poly8x16x2_t vuzpq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai int16x8x2_t vuzpq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4x2_t vuzpq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16x2_t vuzpq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8x2_t vuzpq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4x2_t vuzpq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16x2_t vuzpq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)
__ai float32x2x2_t vzip_f32 ( float32x2_t  __p0,
float32x2_t  __p1 
)
__ai poly16x4x2_t vzip_p16 ( poly16x4_t  __p0,
poly16x4_t  __p1 
)
__ai poly8x8x2_t vzip_p8 ( poly8x8_t  __p0,
poly8x8_t  __p1 
)
__ai int16x4x2_t vzip_s16 ( int16x4_t  __p0,
int16x4_t  __p1 
)
__ai int32x2x2_t vzip_s32 ( int32x2_t  __p0,
int32x2_t  __p1 
)
__ai int8x8x2_t vzip_s8 ( int8x8_t  __p0,
int8x8_t  __p1 
)
__ai uint16x4x2_t vzip_u16 ( uint16x4_t  __p0,
uint16x4_t  __p1 
)
__ai uint32x2x2_t vzip_u32 ( uint32x2_t  __p0,
uint32x2_t  __p1 
)
__ai uint8x8x2_t vzip_u8 ( uint8x8_t  __p0,
uint8x8_t  __p1 
)
__ai float32x4x2_t vzipq_f32 ( float32x4_t  __p0,
float32x4_t  __p1 
)
__ai poly16x8x2_t vzipq_p16 ( poly16x8_t  __p0,
poly16x8_t  __p1 
)
__ai poly8x16x2_t vzipq_p8 ( poly8x16_t  __p0,
poly8x16_t  __p1 
)
__ai int16x8x2_t vzipq_s16 ( int16x8_t  __p0,
int16x8_t  __p1 
)
__ai int32x4x2_t vzipq_s32 ( int32x4_t  __p0,
int32x4_t  __p1 
)
__ai int8x16x2_t vzipq_s8 ( int8x16_t  __p0,
int8x16_t  __p1 
)
__ai uint16x8x2_t vzipq_u16 ( uint16x8_t  __p0,
uint16x8_t  __p1 
)
__ai uint32x4x2_t vzipq_u32 ( uint32x4_t  __p0,
uint32x4_t  __p1 
)
__ai uint8x16x2_t vzipq_u8 ( uint8x16_t  __p0,
uint8x16_t  __p1 
)