diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index b172b57f32..1a0807a718 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -425,7 +425,7 @@ pub fn vmovq_n_f64(value: f64) -> float64x2_t { #[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vget_high_f64(a: float64x2_t) -> float64x1_t { - unsafe { float64x1_t([simd_extract!(a, 1)]) } + unsafe { float64x1_t::from_array([simd_extract!(a, 1)]) } } /// Duplicate vector element to vector or scalar @@ -443,7 +443,7 @@ pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { #[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vget_low_f64(a: float64x2_t) -> float64x1_t { - unsafe { float64x1_t([simd_extract!(a, 0)]) } + unsafe { float64x1_t::from_array([simd_extract!(a, 0)]) } } /// Duplicate vector element to vector or scalar diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 32531c7da1..d31ca1183a 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -12849,7 +12849,7 @@ pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 1)]) } + unsafe { int64x1_t::from_array([simd_extract!(a, 1)]) } } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] @@ -12870,7 +12870,7 @@ pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 1)]) } + unsafe { uint64x1_t::from_array([simd_extract!(a, 1)]) } } #[doc = "Duplicate vector element to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] @@ -13528,7 +13528,7 @@ pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 0)]) } + unsafe { int64x1_t::from_array([simd_extract!(a, 0)]) } } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] @@ -13545,7 +13545,7 @@ pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 0)]) } + unsafe { uint64x1_t::from_array([simd_extract!(a, 0)]) } } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] @@ -39338,7 +39338,7 @@ pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { _vqrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } + unsafe { _vqrshrn_n_s16(a, const { int16x8_t::from_array([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] @@ -39354,7 +39354,7 @@ pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { _vqrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vqrshrn_n_s32(a, const { int32x4_t::from_array([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] @@ -39370,7 +39370,7 @@ pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } + unsafe { _vqrshrn_n_s64(a, const { int64x2_t::from_array([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] @@ -39447,7 +39447,7 @@ pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { _vqrshrn_n_u16( a, const { - uint16x8_t([ + uint16x8_t::from_array([ -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, ]) @@ -39472,7 +39472,7 @@ pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { unsafe { _vqrshrn_n_u32( a, - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, + const { uint32x4_t::from_array([-N as u32, -N as u32, -N as u32, -N as u32]) }, ) } } @@ -39490,7 +39490,7 @@ pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] fn _vqrshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } - unsafe { _vqrshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } + unsafe { _vqrshrn_n_u64(a, const { uint64x2_t::from_array([-N as u64, -N as u64]) }) } } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] @@ -39563,7 +39563,7 @@ pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { _vqrshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } + unsafe { _vqrshrun_n_s16(a, const { int16x8_t::from_array([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] @@ -39579,7 +39579,7 @@ pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { _vqrshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vqrshrun_n_s32(a, const { int32x4_t::from_array([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] @@ -39595,7 +39595,7 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } + unsafe { _vqrshrun_n_s64(a, const { int64x2_t::from_array([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] @@ -40500,7 +40500,7 @@ pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; } - unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } + unsafe { _vqshlu_n_s8(a, const { int8x8_t::from_array([N as i8; 8]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] @@ -40516,7 +40516,7 @@ pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; } - unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } + unsafe { _vqshluq_n_s8(a, const { int8x16_t::from_array([N as i8; 16]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] @@ -40532,7 +40532,7 @@ pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; } - unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } + unsafe { _vqshlu_n_s16(a, const { int16x4_t::from_array([N as i16; 4]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] @@ -40548,7 +40548,7 @@ pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; } - unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } + unsafe { _vqshluq_n_s16(a, const { int16x8_t::from_array([N as i16; 8]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] @@ -40564,7 +40564,7 @@ pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; } - unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } + unsafe { _vqshlu_n_s32(a, const { int32x2_t::from_array([N; 2]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] @@ -40580,7 +40580,7 @@ pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; } - unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } + unsafe { _vqshluq_n_s32(a, const { int32x4_t::from_array([N; 4]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] @@ -40596,7 +40596,7 @@ pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; } - unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } + unsafe { _vqshlu_n_s64(a, const { int64x1_t::from_array([N as i64]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] @@ -40612,7 +40612,7 @@ pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; } - unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } + unsafe { _vqshluq_n_s64(a, const { int64x2_t::from_array([N as i64; 2]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] @@ -40631,7 +40631,7 @@ pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { )] fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; } - unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } + unsafe { _vqshlu_n_s8(a, const { int8x8_t::from_array([N as i8; 8]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] @@ -40650,7 +40650,7 @@ pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { )] fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; } - unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } + unsafe { _vqshluq_n_s8(a, const { int8x16_t::from_array([N as i8; 16]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] @@ -40669,7 +40669,7 @@ pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { )] fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; } - unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } + unsafe { _vqshlu_n_s16(a, const { int16x4_t::from_array([N as i16; 4]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] @@ -40688,7 +40688,7 @@ pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { )] fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; } - unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } + unsafe { _vqshluq_n_s16(a, const { int16x8_t::from_array([N as i16; 8]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] @@ -40707,7 +40707,7 @@ pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { )] fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; } - unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } + unsafe { _vqshlu_n_s32(a, const { int32x2_t::from_array([N; 2]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] @@ -40726,7 +40726,7 @@ pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { )] fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; } - unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } + unsafe { _vqshluq_n_s32(a, const { int32x4_t::from_array([N; 4]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] @@ -40745,7 +40745,7 @@ pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { )] fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; } - unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } + unsafe { _vqshlu_n_s64(a, const { int64x1_t::from_array([N as i64]) }) } } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] @@ -40764,7 +40764,7 @@ pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { )] fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; } - unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } + unsafe { _vqshluq_n_s64(a, const { int64x2_t::from_array([N as i64; 2]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] @@ -40780,7 +40780,7 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { _vqshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } + unsafe { _vqshrn_n_s16(a, const { int16x8_t::from_array([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] @@ -40796,7 +40796,7 @@ pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { _vqshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vqshrn_n_s32(a, const { int32x4_t::from_array([-N; 4]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] @@ -40812,7 +40812,7 @@ pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } + unsafe { _vqshrn_n_s64(a, const { int64x2_t::from_array([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] @@ -40889,7 +40889,7 @@ pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { _vqshrn_n_u16( a, const { - uint16x8_t([ + uint16x8_t::from_array([ -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, ]) @@ -40914,7 +40914,7 @@ pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { unsafe { _vqshrn_n_u32( a, - const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }, + const { uint32x4_t::from_array([-N as u32, -N as u32, -N as u32, -N as u32]) }, ) } } @@ -40932,7 +40932,7 @@ pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; } - unsafe { _vqshrn_n_u64(a, const { uint64x2_t([-N as u64, -N as u64]) }) } + unsafe { _vqshrn_n_u64(a, const { uint64x2_t::from_array([-N as u64, -N as u64]) }) } } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] @@ -41005,7 +41005,7 @@ pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { _vqshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } + unsafe { _vqshrun_n_s16(a, const { int16x8_t::from_array([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] @@ -41021,7 +41021,7 @@ pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { _vqshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vqshrun_n_s32(a, const { int32x4_t::from_array([-N; 4]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] @@ -41037,7 +41037,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } + unsafe { _vqshrun_n_s64(a, const { int64x2_t::from_array([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] @@ -59195,7 +59195,7 @@ pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { _vrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } + unsafe { _vrshrn_n_s16(a, const { int16x8_t::from_array([-N as i16; 8]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] @@ -59211,7 +59211,7 @@ pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { _vrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vrshrn_n_s32(a, const { int32x4_t::from_array([-N; 4]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] @@ -59227,7 +59227,7 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } + unsafe { _vrshrn_n_s64(a, const { int64x2_t::from_array([-N as i64; 2]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs index e00b433536..dcc35ad713 100644 --- a/crates/core_arch/src/macros.rs +++ b/crates/core_arch/src/macros.rs @@ -103,16 +103,28 @@ macro_rules! types { unsafe { simd_shuffle!(one, one, [0; $len]) } } + /// Constructs a vector from an array of the same elements and length + /// + /// For now you don't *have* to use this to construct one of these + /// (depending on the visibility you put on the field) but it's encouraged + /// in case direct construction also gets banned. + #[inline] + $v const fn from_array(array: [$elem_type; $len]) -> Self { + // Projecting into SIMD is banned, but this is technically an + // `Rvalue::Aggregate`, which is not a projection. + $name { do_not_field_project: array } + } + /// Returns an array reference containing the entire SIMD vector. + #[inline] $v const fn as_array(&self) -> &[$elem_type; $len] { // SAFETY: this type is just an overaligned `[T; N]` with // potential padding at the end, so pointer casting to a // `&[T; N]` is safe. // - // NOTE: This deliberately doesn't just use `&self.0` because it may soon be banned + // NOTE: This deliberately doesn't just use `&self.0` it's banned // see https://github.com/rust-lang/compiler-team/issues/838 unsafe { &*(self as *const Self as *const [$elem_type; $len]) } - } /// Returns a mutable array reference containing the entire SIMD vector. diff --git a/crates/core_arch/src/x86/avx.rs b/crates/core_arch/src/x86/avx.rs index 24e0cf6ba1..c87e8c958b 100644 --- a/crates/core_arch/src/x86/avx.rs +++ b/crates/core_arch/src/x86/avx.rs @@ -2391,7 +2391,7 @@ pub fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { - __m256d([a, b, c, d]) + __m256d::from_array([a, b, c, d]) } /// Sets packed single-precision (32-bit) floating-point elements in returned @@ -2403,7 +2403,7 @@ pub fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm256_setr_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32) -> __m256 { - __m256([a, b, c, d, e, f, g, h]) + __m256::from_array([a, b, c, d, e, f, g, h]) } /// Sets packed 8-bit integers in returned vector with the supplied values in diff --git a/crates/core_arch/src/x86/avx512bf16.rs b/crates/core_arch/src/x86/avx512bf16.rs index 85afd91fba..e380503d80 100644 --- a/crates/core_arch/src/x86/avx512bf16.rs +++ b/crates/core_arch/src/x86/avx512bf16.rs @@ -1834,7 +1834,7 @@ mod tests { #[simd_test(enable = "avx512bf16")] unsafe fn test_mm512_cvtpbh_ps() { - let a = __m256bh([ + let a = __m256bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); @@ -1847,7 +1847,7 @@ mod tests { #[simd_test(enable = "avx512bf16")] unsafe fn test_mm512_mask_cvtpbh_ps() { - let a = __m256bh([ + let a = __m256bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); @@ -1864,7 +1864,7 @@ mod tests { #[simd_test(enable = "avx512bf16")] unsafe fn test_mm512_maskz_cvtpbh_ps() { - let a = __m256bh([ + let a = __m256bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); @@ -1878,7 +1878,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm256_cvtpbh_ps() { - let a = __m128bh([ + let a = __m128bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); let r = _mm256_cvtpbh_ps(a); @@ -1888,7 +1888,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm256_mask_cvtpbh_ps() { - let a = __m128bh([ + let a = __m128bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); let src = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); @@ -1900,7 +1900,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm256_maskz_cvtpbh_ps() { - let a = __m128bh([ + let a = __m128bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); let k = 0b1010_1010; @@ -1911,7 +1911,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm_cvtpbh_ps() { - let a = __m128bh([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); + let a = __m128bh::from_array([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); let r = _mm_cvtpbh_ps(a); let e = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); assert_eq_m128(r, e); @@ -1919,7 +1919,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm_mask_cvtpbh_ps() { - let a = __m128bh([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); + let a = __m128bh::from_array([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); let src = _mm_setr_ps(9., 10., 11., 12.); let k = 0b1010; let r = _mm_mask_cvtpbh_ps(src, k, a); @@ -1929,7 +1929,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm_maskz_cvtpbh_ps() { - let a = __m128bh([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); + let a = __m128bh::from_array([BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, 0, 0, 0, 0]); let k = 0b1010; let r = _mm_maskz_cvtpbh_ps(k, a); let e = _mm_setr_ps(0., 2., 0., 4.); @@ -1953,7 +1953,7 @@ mod tests { #[simd_test(enable = "avx512bf16,avx512vl")] unsafe fn test_mm_mask_cvtneps_pbh() { let a = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let src = __m128bh([5, 6, 7, 8, !0, !0, !0, !0]); + let src = __m128bh::from_array([5, 6, 7, 8, !0, !0, !0, !0]); let k = 0b1010; let r: u16x4 = transmute_copy(&_mm_mask_cvtneps_pbh(src, k, a)); let e = u16x4::new(5, BF16_TWO, 7, BF16_FOUR); diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 8c914803c6..561625ec96 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -19,7 +19,7 @@ pub fn _mm_set_ph( e1: f16, e0: f16, ) -> __m128h { - __m128h([e0, e1, e2, e3, e4, e5, e6, e7]) + __m128h::from_array([e0, e1, e2, e3, e4, e5, e6, e7]) } /// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values. @@ -46,7 +46,7 @@ pub fn _mm256_set_ph( e1: f16, e0: f16, ) -> __m256h { - __m256h([ + __m256h::from_array([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, ]) } @@ -91,7 +91,7 @@ pub fn _mm512_set_ph( e1: f16, e0: f16, ) -> __m512h { - __m512h([ + __m512h::from_array([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, ]) @@ -105,7 +105,7 @@ pub fn _mm512_set_ph( #[target_feature(enable = "avx512fp16")] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub fn _mm_set_sh(a: f16) -> __m128h { - __m128h([a, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + __m128h::from_array([a, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) } /// Broadcast the half-precision (16-bit) floating-point value a to all elements of dst. @@ -154,7 +154,7 @@ pub fn _mm_setr_ph( e6: f16, e7: f16, ) -> __m128h { - __m128h([e0, e1, e2, e3, e4, e5, e6, e7]) + __m128h::from_array([e0, e1, e2, e3, e4, e5, e6, e7]) } /// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values in reverse order. @@ -181,7 +181,7 @@ pub fn _mm256_setr_ph( e14: f16, e15: f16, ) -> __m256h { - __m256h([ + __m256h::from_array([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, ]) } @@ -226,7 +226,7 @@ pub fn _mm512_setr_ph( e30: f16, e31: f16, ) -> __m512h { - __m512h([ + __m512h::from_array([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, ]) diff --git a/crates/core_arch/src/x86/avxneconvert.rs b/crates/core_arch/src/x86/avxneconvert.rs index b92ec823ec..0172266eb2 100644 --- a/crates/core_arch/src/x86/avxneconvert.rs +++ b/crates/core_arch/src/x86/avxneconvert.rs @@ -275,7 +275,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneebf16_ps() { - let a = __m128bh([ + let a = __m128bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); let r = _mm_cvtneebf16_ps(addr_of!(a)); @@ -285,7 +285,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm256_cvtneebf16_ps() { - let a = __m256bh([ + let a = __m256bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); @@ -296,7 +296,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneeph_ps() { - let a = __m128h([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]); + let a = __m128h::from_array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]); let r = _mm_cvtneeph_ps(addr_of!(a)); let e = _mm_setr_ps(1., 3., 5., 7.); assert_eq_m128(r, e); @@ -304,7 +304,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm256_cvtneeph_ps() { - let a = __m256h([ + let a = __m256h::from_array([ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ]); let r = _mm256_cvtneeph_ps(addr_of!(a)); @@ -314,7 +314,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneobf16_ps() { - let a = __m128bh([ + let a = __m128bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); let r = _mm_cvtneobf16_ps(addr_of!(a)); @@ -324,7 +324,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm256_cvtneobf16_ps() { - let a = __m256bh([ + let a = __m256bh::from_array([ BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, BF16_ONE, BF16_TWO, BF16_THREE, BF16_FOUR, BF16_FIVE, BF16_SIX, BF16_SEVEN, BF16_EIGHT, ]); @@ -335,7 +335,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneoph_ps() { - let a = __m128h([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]); + let a = __m128h::from_array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]); let r = _mm_cvtneoph_ps(addr_of!(a)); let e = _mm_setr_ps(2., 4., 6., 8.); assert_eq_m128(r, e); @@ -343,7 +343,7 @@ mod tests { #[simd_test(enable = "avxneconvert")] unsafe fn test_mm256_cvtneoph_ps() { - let a = __m256h([ + let a = __m256h::from_array([ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ]); let r = _mm256_cvtneoph_ps(addr_of!(a)); diff --git a/crates/core_arch/src/x86/sse.rs b/crates/core_arch/src/x86/sse.rs index 1eca66adc2..4282c84d85 100644 --- a/crates/core_arch/src/x86/sse.rs +++ b/crates/core_arch/src/x86/sse.rs @@ -905,7 +905,7 @@ pub fn _mm_cvt_si2ss(a: __m128, b: i32) -> __m128 { #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm_set_ss(a: f32) -> __m128 { - __m128([a, 0.0, 0.0, 0.0]) + __m128::from_array([a, 0.0, 0.0, 0.0]) } /// Construct a `__m128` with all element set to `a`. @@ -916,7 +916,7 @@ pub fn _mm_set_ss(a: f32) -> __m128 { #[cfg_attr(test, assert_instr(shufps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm_set1_ps(a: f32) -> __m128 { - __m128([a, a, a, a]) + __m128::from_array([a, a, a, a]) } /// Alias for [`_mm_set1_ps`](fn._mm_set1_ps.html) @@ -954,7 +954,7 @@ pub fn _mm_set_ps1(a: f32) -> __m128 { #[cfg_attr(test, assert_instr(unpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { - __m128([d, c, b, a]) + __m128::from_array([d, c, b, a]) } /// Construct a `__m128` from four floating point values lowest to highest. @@ -980,7 +980,7 @@ pub fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { )] #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { - __m128([a, b, c, d]) + __m128::from_array([a, b, c, d]) } /// Construct a `__m128` with all elements initialized to zero. @@ -1116,7 +1116,7 @@ pub fn _mm_movemask_ps(a: __m128) -> i32 { #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { - __m128([*p, 0.0, 0.0, 0.0]) + __m128::from_array([*p, 0.0, 0.0, 0.0]) } /// Construct a `__m128` by duplicating the value read from `p` into all @@ -1132,7 +1132,7 @@ pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_load1_ps(p: *const f32) -> __m128 { let a = *p; - __m128([a, a, a, a]) + __m128::from_array([a, a, a, a]) } /// Alias for [`_mm_load1_ps`](fn._mm_load1_ps.html) diff --git a/crates/core_arch/src/x86/sse2.rs b/crates/core_arch/src/x86/sse2.rs index 1eaa89663b..7ccafaade1 100644 --- a/crates/core_arch/src/x86/sse2.rs +++ b/crates/core_arch/src/x86/sse2.rs @@ -2497,7 +2497,7 @@ pub fn _mm_set_pd1(a: f64) -> __m128d { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] pub fn _mm_set_pd(a: f64, b: f64) -> __m128d { - __m128d([b, a]) + __m128d::from_array([b, a]) } /// Sets packed double-precision (64-bit) floating-point elements in the return diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index c96c6e2a0c..dbce740323 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -7830,9 +7830,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t::from_array([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t::from_array([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t::from_array([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -7885,9 +7885,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t::from_array([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t::from_array([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t::from_array([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8061,9 +8061,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t::from_array([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t::from_array([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t::from_array([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8116,9 +8116,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [uint16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] - - [uint32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }'] - - [uint64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { uint64x2_t([-N as u64, -N as u64]) }'] + - [uint16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { uint16x8_t::from_array([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] + - [uint32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { uint32x4_t::from_array([-N as u32, -N as u32, -N as u32, -N as u32]) }'] + - [uint64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { uint64x2_t::from_array([-N as u64, -N as u64]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8171,9 +8171,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t::from_array([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t::from_array([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t::from_array([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8895,9 +8895,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t::from_array([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t::from_array([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t::from_array([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -10557,9 +10557,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [uint16x8_t, uint8x8_t, '8', 'const { uint16x8_t([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] - - [uint32x4_t, uint16x4_t, '16', 'const { uint32x4_t([-N as u32, -N as u32, -N as u32, -N as u32]) }'] - - [uint64x2_t, uint32x2_t, '32', 'const { uint64x2_t([-N as u64, -N as u64]) }'] + - [uint16x8_t, uint8x8_t, '8', 'const { uint16x8_t::from_array([-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16]) }'] + - [uint32x4_t, uint16x4_t, '16', 'const { uint32x4_t::from_array([-N as u32, -N as u32, -N as u32, -N as u32]) }'] + - [uint64x2_t, uint32x2_t, '32', 'const { uint64x2_t::from_array([-N as u64, -N as u64]) }'] compose: - FnCall: [static_assert!, ['N >= 1 && N <= {type[2]}']] - LLVMLink: @@ -11413,14 +11413,14 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int8x8_t, uint8x8_t, '3', 'const { int8x8_t([N as i8; 8]) }'] - - [int16x4_t, uint16x4_t, '4', 'const { int16x4_t([N as i16; 4]) }'] - - [int32x2_t, uint32x2_t, '5', 'const { int32x2_t([N; 2]) }'] - - [int64x1_t, uint64x1_t, '6', 'const { int64x1_t([N as i64]) }'] - - [int8x16_t, uint8x16_t, '3', 'const { int8x16_t([N as i8; 16]) }'] - - [int16x8_t, uint16x8_t, '4', 'const { int16x8_t([N as i16; 8]) }'] - - [int32x4_t, uint32x4_t, '5', 'const { int32x4_t([N; 4]) }'] - - [int64x2_t, uint64x2_t, '6', 'const { int64x2_t([N as i64; 2]) }'] + - [int8x8_t, uint8x8_t, '3', 'const { int8x8_t::from_array([N as i8; 8]) }'] + - [int16x4_t, uint16x4_t, '4', 'const { int16x4_t::from_array([N as i16; 4]) }'] + - [int32x2_t, uint32x2_t, '5', 'const { int32x2_t::from_array([N; 2]) }'] + - [int64x1_t, uint64x1_t, '6', 'const { int64x1_t::from_array([N as i64]) }'] + - [int8x16_t, uint8x16_t, '3', 'const { int8x16_t::from_array([N as i8; 16]) }'] + - [int16x8_t, uint16x8_t, '4', 'const { int16x8_t::from_array([N as i16; 8]) }'] + - [int32x4_t, uint32x4_t, '5', 'const { int32x4_t::from_array([N; 4]) }'] + - [int64x2_t, uint64x2_t, '6', 'const { int64x2_t::from_array([N as i64; 2]) }'] compose: - FnCall: [static_assert_uimm_bits!, [N, "{type[2]}"]] - LLVMLink: @@ -11445,14 +11445,14 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int8x8_t, uint8x8_t, '3', 'const { int8x8_t([N as i8; 8]) }'] - - [int16x4_t, uint16x4_t, '4', 'const { int16x4_t([N as i16; 4]) }'] - - [int32x2_t, uint32x2_t, '5', 'const { int32x2_t([N; 2]) }'] - - [int64x1_t, uint64x1_t, '6', 'const { int64x1_t([N as i64]) }'] - - [int8x16_t, uint8x16_t, '3', 'const { int8x16_t([N as i8; 16]) }'] - - [int16x8_t, uint16x8_t, '4', 'const { int16x8_t([N as i16; 8]) }'] - - [int32x4_t, uint32x4_t, '5', 'const { int32x4_t([N; 4]) }'] - - [int64x2_t, uint64x2_t, '6', 'const { int64x2_t([N as i64; 2]) }'] + - [int8x8_t, uint8x8_t, '3', 'const { int8x8_t::from_array([N as i8; 8]) }'] + - [int16x4_t, uint16x4_t, '4', 'const { int16x4_t::from_array([N as i16; 4]) }'] + - [int32x2_t, uint32x2_t, '5', 'const { int32x2_t::from_array([N; 2]) }'] + - [int64x1_t, uint64x1_t, '6', 'const { int64x1_t::from_array([N as i64]) }'] + - [int8x16_t, uint8x16_t, '3', 'const { int8x16_t::from_array([N as i8; 16]) }'] + - [int16x8_t, uint16x8_t, '4', 'const { int16x8_t::from_array([N as i16; 8]) }'] + - [int32x4_t, uint32x4_t, '5', 'const { int32x4_t::from_array([N; 4]) }'] + - [int64x2_t, uint64x2_t, '6', 'const { int64x2_t::from_array([N as i64; 2]) }'] compose: - FnCall: [static_assert_uimm_bits!, [N, "{type[2]}"]] - LLVMLink: @@ -14789,8 +14789,8 @@ intrinsics: - *neon-cfg-arm-unstable safety: safe types: - - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'unsafe { int64x1_t([simd_extract!(a, 1)]) }'] - - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'unsafe { uint64x1_t([simd_extract!(a, 1)]) }'] + - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'unsafe { int64x1_t::from_array([simd_extract!(a, 1)]) }'] + - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'unsafe { uint64x1_t::from_array([simd_extract!(a, 1)]) }'] compose: - Identifier: ['{type[5]}', Symbol] @@ -14805,8 +14805,8 @@ intrinsics: - *neon-cfg-arm-unstable safety: safe types: - - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'unsafe { int64x1_t([simd_extract!(a, 0)]) }'] - - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'unsafe { uint64x1_t([simd_extract!(a, 0)]) }'] + - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'unsafe { int64x1_t::from_array([simd_extract!(a, 0)]) }'] + - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'unsafe { uint64x1_t::from_array([simd_extract!(a, 0)]) }'] compose: - Identifier: ['{type[3]}', Symbol]