diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index b55d238385..088377c13f 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -39546,17 +39546,7 @@ pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vqrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] @@ -39572,12 +39562,7 @@ pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vqrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] @@ -39593,7 +39578,7 @@ pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] @@ -39786,17 +39771,7 @@ pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { - _vqrshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqrshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] @@ -39812,12 +39787,7 @@ pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { - _vqrshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqrshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] @@ -39833,7 +39803,7 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] @@ -41018,17 +40988,7 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vqshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] @@ -41044,12 +41004,7 @@ pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vqshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] @@ -41065,7 +41020,7 @@ pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] @@ -41258,17 +41213,7 @@ pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; } - unsafe { - _vqshrun_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vqshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] @@ -41284,12 +41229,7 @@ pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; } - unsafe { - _vqshrun_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vqshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] @@ -41305,7 +41245,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; } - unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] @@ -59463,17 +59403,7 @@ pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } - unsafe { - _vrshrn_n_s16( - a, - const { - int16x8_t([ - -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, - -N as i16, - ]) - }, - ) - } + unsafe { _vrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] @@ -59489,12 +59419,7 @@ pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; } - unsafe { - _vrshrn_n_s32( - a, - const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }, - ) - } + unsafe { _vrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] @@ -59510,7 +59435,7 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; } - unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64, -N as i64]) }) } + unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] @@ -63163,7 +63088,7 @@ pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vshiftins_v2i32( transmute(a), transmute(b), - int32x2_t::splat(N as i32), + int32x2_t::splat(N), )) } } @@ -63181,7 +63106,7 @@ pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t::splat(N as i32), + int32x4_t::splat(N), )) } } @@ -63719,7 +63644,7 @@ pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[rustc_legacy_const_generics(2)] pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(1 <= N && N <= 32); - vshiftins_v2i32(a, b, int32x2_t::splat(-N as i32)) + vshiftins_v2i32(a, b, int32x2_t::splat(-N)) } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] @@ -63731,7 +63656,7 @@ pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[rustc_legacy_const_generics(2)] pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(1 <= N && N <= 32); - vshiftins_v4i32(a, b, int32x4_t::splat(-N as i32)) + vshiftins_v4i32(a, b, int32x4_t::splat(-N)) } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index f6d9103f05..0264bb938d 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -7874,9 +7874,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -7929,9 +7929,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8105,9 +8105,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8215,9 +8215,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, uint8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, uint16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, uint32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -8939,9 +8939,9 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16]) }'] - - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N as i32, -N as i32, -N as i32, -N as i32]) }'] - - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64, -N as i64]) }'] + - [int16x8_t, int8x8_t, 'N >= 1 && N <= 8', 'const { int16x8_t([-N as i16; 8]) }'] + - [int32x4_t, int16x4_t, 'N >= 1 && N <= 16', 'const { int32x4_t([-N; 4]) }'] + - [int64x2_t, int32x2_t, 'N >= 1 && N <= 32', 'const { int64x2_t([-N as i64; 2]) }'] compose: - FnCall: [static_assert!, ["{type[2]}"]] - LLVMLink: @@ -13862,8 +13862,8 @@ intrinsics: - [int8x16_t, '8', '1 <= N && N <= 8', 'v16i8', 'int8x16_t::splat', '-N as i8'] - [int16x4_t, '16', '1 <= N && N <= 16', 'v4i16', 'int16x4_t::splat', '-N as i16'] - [int16x8_t, '16', '1 <= N && N <= 16', 'v8i16', 'int16x8_t::splat', '-N as i16'] - - [int32x2_t, '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N as i32'] - - [int32x4_t, '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N as i32'] + - [int32x2_t, '32', '1 <= N && N <= 32', 'v2i32', 'int32x2_t::splat', '-N'] + - [int32x4_t, '32', '1 <= N && N <= 32', 'v4i32', 'int32x4_t::splat', '-N'] - [int64x1_t, '64', '1 <= N && N <= 64', 'v1i64', 'int64x1_t::splat', '-N as i64'] - [int64x2_t, '64', '1 <= N && N <= 64', 'v2i64', 'int64x2_t::splat', '-N as i64'] compose: @@ -13891,8 +13891,8 @@ intrinsics: - [uint8x16_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v16i8', 'int8x16_t::splat', 'N as i8'] - [uint16x4_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v4i16', 'int16x4_t::splat', 'N as i16'] - [uint16x8_t, "neon,v7", '16', 'static_assert_uimm_bits!', 'N, 4', 'v8i16', 'int16x8_t::splat', 'N as i16'] - - [uint32x2_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N as i32'] - - [uint32x4_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N as i32'] + - [uint32x2_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v2i32', 'int32x2_t::splat', 'N'] + - [uint32x4_t, "neon,v7", '32', 'static_assert!', 'N >= 0 && N <= 31', 'v4i32', 'int32x4_t::splat', 'N'] - [uint64x1_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v1i64', 'int64x1_t::splat', 'N as i64'] - [uint64x2_t, "neon,v7", '64', 'static_assert!', 'N >= 0 && N <= 63', 'v2i64', 'int64x2_t::splat', 'N as i64'] - [poly8x8_t, "neon,v7", '8', 'static_assert_uimm_bits!', 'N, 3', 'v8i8', 'int8x8_t::splat', 'N as i8']