diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index ce864cc7d9..b1d36343a0 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -613,7 +613,7 @@ pub fn vaddvq_f64(a: float64x2_t) -> f64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddv_s32(a: int32x2_t) -> i32 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] @@ -622,7 +622,7 @@ pub fn vaddv_s32(a: int32x2_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_s8(a: int8x8_t) -> i8 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] @@ -631,7 +631,7 @@ pub fn vaddv_s8(a: int8x8_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s8(a: int8x16_t) -> i8 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] @@ -640,7 +640,7 @@ pub fn vaddvq_s8(a: int8x16_t) -> i8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_s16(a: int16x4_t) -> i16 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] @@ -649,7 +649,7 @@ pub fn vaddv_s16(a: int16x4_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s16(a: int16x8_t) -> i16 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] @@ -658,7 +658,7 @@ pub fn vaddvq_s16(a: int16x8_t) -> i16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_s32(a: int32x4_t) -> i32 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] @@ -667,7 +667,7 @@ pub fn vaddvq_s32(a: int32x4_t) -> i32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddv_u32(a: uint32x2_t) -> u32 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] @@ -676,7 +676,7 @@ pub fn vaddv_u32(a: uint32x2_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_u8(a: uint8x8_t) -> u8 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] @@ -685,7 +685,7 @@ pub fn vaddv_u8(a: uint8x8_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u8(a: uint8x16_t) -> u8 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] @@ -694,7 +694,7 @@ pub fn vaddvq_u8(a: uint8x16_t) -> u8 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddv_u16(a: uint16x4_t) -> u16 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] @@ -703,7 +703,7 @@ pub fn vaddv_u16(a: uint16x4_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u16(a: uint16x8_t) -> u16 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] @@ -712,7 +712,7 @@ pub fn vaddvq_u16(a: uint16x8_t) -> u16 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] pub fn vaddvq_u32(a: uint32x4_t) -> u32 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] @@ -721,7 +721,7 @@ pub fn vaddvq_u32(a: uint32x4_t) -> u32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddvq_s64(a: int64x2_t) -> i64 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] @@ -730,7 +730,7 @@ pub fn vaddvq_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vaddvq_u64(a: uint64x2_t) -> u64 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"] @@ -15774,7 +15774,7 @@ pub fn vpadds_f32(a: float32x2_t) -> f32 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddd_s64(a: int64x2_t) -> i64 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] @@ -15783,7 +15783,7 @@ pub fn vpaddd_s64(a: int64x2_t) -> i64 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] pub fn vpaddd_u64(a: uint64x2_t) -> u64 { - unsafe { simd_reduce_add_unordered(a) } + unsafe { simd_reduce_add_ordered(a, 0) } } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"] diff --git a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 48c12779a8..41ba11fb06 100644 --- a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -13261,7 +13261,7 @@ intrinsics: types: - [int64x2_t, i64] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vpaddd_u64" doc: "Add pairwise" @@ -13274,7 +13274,7 @@ intrinsics: types: - [uint64x2_t, u64] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13291,7 +13291,7 @@ intrinsics: - [int16x8_t, i16] - [int32x4_t, i32] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13304,7 +13304,7 @@ intrinsics: types: - [int32x2_t, i32] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13317,7 +13317,7 @@ intrinsics: types: - [int64x2_t, i64] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13334,7 +13334,7 @@ intrinsics: - [uint16x8_t, u16] - [uint32x4_t, u32] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13347,7 +13347,7 @@ intrinsics: types: - [uint32x2_t, u32, i32] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddv{neon_type[0].no}" doc: "Add across vector" @@ -13360,7 +13360,7 @@ intrinsics: types: - [uint64x2_t, u64, i64] compose: - - FnCall: [simd_reduce_add_unordered, [a]] + - FnCall: [simd_reduce_add_ordered, [a, 0]] - name: "vaddlv{neon_type[0].no}" doc: "Signed Add Long across Vector"