1#![allow(non_camel_case_types)]
7#![allow(unused_imports)]
8
9use crate::{core_arch::simd, intrinsics::simd::*, marker::Sized, mem, ptr};
10
11#[cfg(test)]
12use stdarch_test::assert_instr;
13
14types! {
15    #![stable(feature = "wasm_simd", since = "1.54.0")]
16
17    pub struct v128(4 x i32);
39}
40
41macro_rules! conversions {
42    ($(($name:ident = $ty:ty))*) => {
43        impl v128 {
44            $(
45                #[inline(always)]
46                pub(crate) fn $name(self) -> $ty {
47                    unsafe { mem::transmute(self) }
48                }
49            )*
50        }
51        $(
52            impl $ty {
53                #[inline(always)]
54                pub(crate) const fn v128(self) -> v128 {
55                    unsafe { mem::transmute(self) }
56                }
57            }
58        )*
59    }
60}
61
62conversions! {
63    (as_u8x16 = simd::u8x16)
64    (as_u16x8 = simd::u16x8)
65    (as_u32x4 = simd::u32x4)
66    (as_u64x2 = simd::u64x2)
67    (as_i8x16 = simd::i8x16)
68    (as_i16x8 = simd::i16x8)
69    (as_i32x4 = simd::i32x4)
70    (as_i64x2 = simd::i64x2)
71    (as_f32x4 = simd::f32x4)
72    (as_f64x2 = simd::f64x2)
73}
74
75#[allow(improper_ctypes)]
76unsafe extern "unadjusted" {
77    #[link_name = "llvm.wasm.swizzle"]
78    fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
79
80    #[link_name = "llvm.wasm.bitselect.v16i8"]
81    fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
82    #[link_name = "llvm.wasm.anytrue.v16i8"]
83    fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
84
85    #[link_name = "llvm.wasm.alltrue.v16i8"]
86    fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87    #[link_name = "llvm.wasm.bitmask.v16i8"]
88    fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
89    #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
90    fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
91    #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
92    fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
93    #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
94    fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
95
96    #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
97    fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
98    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
99    fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
100    #[link_name = "llvm.wasm.q15mulr.sat.signed"]
101    fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
102    #[link_name = "llvm.wasm.alltrue.v8i16"]
103    fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
104    #[link_name = "llvm.wasm.bitmask.v8i16"]
105    fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
106    #[link_name = "llvm.wasm.narrow.signed.v8i16.v4i32"]
107    fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
108    #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
109    fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
110    #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
111    fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
112
113    #[link_name = "llvm.wasm.extadd.pairwise.signed.v16i8"]
114    fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
115    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v16i8"]
116    fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
117    #[link_name = "llvm.wasm.alltrue.v4i32"]
118    fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
119    #[link_name = "llvm.wasm.bitmask.v4i32"]
120    fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
121    #[link_name = "llvm.wasm.dot"]
122    fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
123
124    #[link_name = "llvm.wasm.alltrue.v2i64"]
125    fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
126    #[link_name = "llvm.wasm.bitmask.v2i64"]
127    fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
128
129    #[link_name = "llvm.nearbyint.v4f32"]
130    fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
131    #[link_name = "llvm.minimum.v4f32"]
132    fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
133    #[link_name = "llvm.maximum.v4f32"]
134    fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
135
136    #[link_name = "llvm.nearbyint.v2f64"]
137    fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
138    #[link_name = "llvm.minimum.v2f64"]
139    fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
140    #[link_name = "llvm.maximum.v2f64"]
141    fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
142}
143
144#[repr(packed)]
145#[derive(Copy)]
146struct Unaligned<T>(T);
147
148impl<T: Copy> Clone for Unaligned<T> {
149    fn clone(&self) -> Unaligned<T> {
150        *self
151    }
152}
153
154#[inline]
177#[cfg_attr(test, assert_instr(v128.load))]
178#[target_feature(enable = "simd128")]
179#[doc(alias("v128.load"))]
180#[stable(feature = "wasm_simd", since = "1.54.0")]
181pub unsafe fn v128_load(m: *const v128) -> v128 {
182    (*(m as *const Unaligned<v128>)).0
183}
184
185#[inline]
194#[cfg_attr(test, assert_instr(v128.load8x8_s))]
195#[target_feature(enable = "simd128")]
196#[doc(alias("v128.load8x8_s"))]
197#[stable(feature = "wasm_simd", since = "1.54.0")]
198pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
199    let m = *(m as *const Unaligned<simd::i8x8>);
200    simd_cast::<_, simd::i16x8>(m.0).v128()
201}
202
203#[inline]
212#[cfg_attr(test, assert_instr(v128.load8x8_u))]
213#[target_feature(enable = "simd128")]
214#[doc(alias("v128.load8x8_u"))]
215#[stable(feature = "wasm_simd", since = "1.54.0")]
216pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
217    let m = *(m as *const Unaligned<simd::u8x8>);
218    simd_cast::<_, simd::u16x8>(m.0).v128()
219}
220
221#[stable(feature = "wasm_simd", since = "1.54.0")]
222pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
223
224#[inline]
233#[cfg_attr(test, assert_instr(v128.load16x4_s))]
234#[target_feature(enable = "simd128")]
235#[doc(alias("v128.load16x4_s"))]
236#[stable(feature = "wasm_simd", since = "1.54.0")]
237pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
238    let m = *(m as *const Unaligned<simd::i16x4>);
239    simd_cast::<_, simd::i32x4>(m.0).v128()
240}
241
242#[inline]
251#[cfg_attr(test, assert_instr(v128.load16x4_u))]
252#[target_feature(enable = "simd128")]
253#[doc(alias("v128.load16x4_u"))]
254#[stable(feature = "wasm_simd", since = "1.54.0")]
255pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
256    let m = *(m as *const Unaligned<simd::u16x4>);
257    simd_cast::<_, simd::u32x4>(m.0).v128()
258}
259
260#[stable(feature = "wasm_simd", since = "1.54.0")]
261pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
262
263#[inline]
272#[cfg_attr(test, assert_instr(v128.load32x2_s))]
273#[target_feature(enable = "simd128")]
274#[doc(alias("v128.load32x2_s"))]
275#[stable(feature = "wasm_simd", since = "1.54.0")]
276pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
277    let m = *(m as *const Unaligned<simd::i32x2>);
278    simd_cast::<_, simd::i64x2>(m.0).v128()
279}
280
281#[inline]
290#[cfg_attr(test, assert_instr(v128.load32x2_u))]
291#[target_feature(enable = "simd128")]
292#[doc(alias("v128.load32x2_u"))]
293#[stable(feature = "wasm_simd", since = "1.54.0")]
294pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
295    let m = *(m as *const Unaligned<simd::u32x2>);
296    simd_cast::<_, simd::u64x2>(m.0).v128()
297}
298
299#[stable(feature = "wasm_simd", since = "1.54.0")]
300pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
301
302#[inline]
315#[cfg_attr(test, assert_instr(v128.load8_splat))]
316#[target_feature(enable = "simd128")]
317#[doc(alias("v128.load8_splat"))]
318#[stable(feature = "wasm_simd", since = "1.54.0")]
319pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
320    u8x16_splat(*m)
321}
322
323#[inline]
336#[cfg_attr(test, assert_instr(v128.load16_splat))]
337#[target_feature(enable = "simd128")]
338#[doc(alias("v128.load16_splat"))]
339#[stable(feature = "wasm_simd", since = "1.54.0")]
340pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
341    u16x8_splat(ptr::read_unaligned(m))
342}
343
344#[inline]
357#[cfg_attr(test, assert_instr(v128.load32_splat))]
358#[target_feature(enable = "simd128")]
359#[doc(alias("v128.load32_splat"))]
360#[stable(feature = "wasm_simd", since = "1.54.0")]
361pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
362    u32x4_splat(ptr::read_unaligned(m))
363}
364
365#[inline]
378#[cfg_attr(test, assert_instr(v128.load64_splat))]
379#[target_feature(enable = "simd128")]
380#[doc(alias("v128.load64_splat"))]
381#[stable(feature = "wasm_simd", since = "1.54.0")]
382pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
383    u64x2_splat(ptr::read_unaligned(m))
384}
385
386#[inline]
399#[cfg_attr(test, assert_instr(v128.load32_zero))]
400#[target_feature(enable = "simd128")]
401#[doc(alias("v128.load32_zero"))]
402#[stable(feature = "wasm_simd", since = "1.54.0")]
403pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
404    u32x4(ptr::read_unaligned(m), 0, 0, 0)
405}
406
407#[inline]
420#[cfg_attr(test, assert_instr(v128.load64_zero))]
421#[target_feature(enable = "simd128")]
422#[doc(alias("v128.load64_zero"))]
423#[stable(feature = "wasm_simd", since = "1.54.0")]
424pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
425    u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
426}
427
428#[inline]
451#[cfg_attr(test, assert_instr(v128.store))]
452#[target_feature(enable = "simd128")]
453#[doc(alias("v128.store"))]
454#[stable(feature = "wasm_simd", since = "1.54.0")]
455pub unsafe fn v128_store(m: *mut v128, a: v128) {
456    *(m as *mut Unaligned<v128>) = Unaligned(a);
457}
458
459#[inline]
471#[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
472#[target_feature(enable = "simd128")]
473#[doc(alias("v128.load8_lane"))]
474#[stable(feature = "wasm_simd", since = "1.54.0")]
475pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
476    u8x16_replace_lane::<L>(v, *m)
477}
478
479#[inline]
491#[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
492#[target_feature(enable = "simd128")]
493#[doc(alias("v128.load16_lane"))]
494#[stable(feature = "wasm_simd", since = "1.54.0")]
495pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
496    u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
497}
498
499#[inline]
511#[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
512#[target_feature(enable = "simd128")]
513#[doc(alias("v128.load32_lane"))]
514#[stable(feature = "wasm_simd", since = "1.54.0")]
515pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
516    u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
517}
518
519#[inline]
531#[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
532#[target_feature(enable = "simd128")]
533#[doc(alias("v128.load64_lane"))]
534#[stable(feature = "wasm_simd", since = "1.54.0")]
535pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
536    u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
537}
538
539#[inline]
551#[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
552#[target_feature(enable = "simd128")]
553#[doc(alias("v128.store8_lane"))]
554#[stable(feature = "wasm_simd", since = "1.54.0")]
555pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
556    *m = u8x16_extract_lane::<L>(v);
557}
558
559#[inline]
571#[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
572#[target_feature(enable = "simd128")]
573#[doc(alias("v128.store16_lane"))]
574#[stable(feature = "wasm_simd", since = "1.54.0")]
575pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
576    ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
577}
578
579#[inline]
591#[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
592#[target_feature(enable = "simd128")]
593#[doc(alias("v128.store32_lane"))]
594#[stable(feature = "wasm_simd", since = "1.54.0")]
595pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
596    ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
597}
598
599#[inline]
611#[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
612#[target_feature(enable = "simd128")]
613#[doc(alias("v128.store64_lane"))]
614#[stable(feature = "wasm_simd", since = "1.54.0")]
615pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
616    ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
617}
618
619#[inline]
624#[cfg_attr(
625    test,
626    assert_instr(
627        v128.const,
628        a0 = 0,
629        a1 = 1,
630        a2 = 2,
631        a3 = 3,
632        a4 = 4,
633        a5 = 5,
634        a6 = 6,
635        a7 = 7,
636        a8 = 8,
637        a9 = 9,
638        a10 = 10,
639        a11 = 11,
640        a12 = 12,
641        a13 = 13,
642        a14 = 14,
643        a15 = 15,
644    )
645)]
646#[doc(alias("v128.const"))]
647#[stable(feature = "wasm_simd", since = "1.54.0")]
648#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
649#[target_feature(enable = "simd128")]
650pub const fn i8x16(
651    a0: i8,
652    a1: i8,
653    a2: i8,
654    a3: i8,
655    a4: i8,
656    a5: i8,
657    a6: i8,
658    a7: i8,
659    a8: i8,
660    a9: i8,
661    a10: i8,
662    a11: i8,
663    a12: i8,
664    a13: i8,
665    a14: i8,
666    a15: i8,
667) -> v128 {
668    simd::i8x16::new(
669        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
670    )
671    .v128()
672}
673
674#[inline]
679#[doc(alias("v128.const"))]
680#[stable(feature = "wasm_simd", since = "1.54.0")]
681#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
682#[target_feature(enable = "simd128")]
683pub const fn u8x16(
684    a0: u8,
685    a1: u8,
686    a2: u8,
687    a3: u8,
688    a4: u8,
689    a5: u8,
690    a6: u8,
691    a7: u8,
692    a8: u8,
693    a9: u8,
694    a10: u8,
695    a11: u8,
696    a12: u8,
697    a13: u8,
698    a14: u8,
699    a15: u8,
700) -> v128 {
701    simd::u8x16::new(
702        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
703    )
704    .v128()
705}
706
707#[inline]
712#[cfg_attr(
713    test,
714    assert_instr(
715        v128.const,
716        a0 = 0,
717        a1 = 1,
718        a2 = 2,
719        a3 = 3,
720        a4 = 4,
721        a5 = 5,
722        a6 = 6,
723        a7 = 7,
724    )
725)]
726#[doc(alias("v128.const"))]
727#[stable(feature = "wasm_simd", since = "1.54.0")]
728#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
729#[target_feature(enable = "simd128")]
730pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
731    simd::i16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
732}
733
734#[inline]
739#[doc(alias("v128.const"))]
740#[stable(feature = "wasm_simd", since = "1.54.0")]
741#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
742#[target_feature(enable = "simd128")]
743pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
744    simd::u16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
745}
746
747#[inline]
752#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
753#[doc(alias("v128.const"))]
754#[stable(feature = "wasm_simd", since = "1.54.0")]
755#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
756#[target_feature(enable = "simd128")]
757pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
758    simd::i32x4::new(a0, a1, a2, a3).v128()
759}
760
761#[inline]
766#[doc(alias("v128.const"))]
767#[stable(feature = "wasm_simd", since = "1.54.0")]
768#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
769#[target_feature(enable = "simd128")]
770pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
771    simd::u32x4::new(a0, a1, a2, a3).v128()
772}
773
774#[inline]
779#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
780#[doc(alias("v128.const"))]
781#[stable(feature = "wasm_simd", since = "1.54.0")]
782#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
783#[target_feature(enable = "simd128")]
784pub const fn i64x2(a0: i64, a1: i64) -> v128 {
785    simd::i64x2::new(a0, a1).v128()
786}
787
788#[inline]
793#[doc(alias("v128.const"))]
794#[stable(feature = "wasm_simd", since = "1.54.0")]
795#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
796#[target_feature(enable = "simd128")]
797pub const fn u64x2(a0: u64, a1: u64) -> v128 {
798    simd::u64x2::new(a0, a1).v128()
799}
800
801#[inline]
806#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
807#[doc(alias("v128.const"))]
808#[stable(feature = "wasm_simd", since = "1.54.0")]
809#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
810#[target_feature(enable = "simd128")]
811pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
812    simd::f32x4::new(a0, a1, a2, a3).v128()
813}
814
815#[inline]
820#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
821#[doc(alias("v128.const"))]
822#[stable(feature = "wasm_simd", since = "1.54.0")]
823#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
824#[target_feature(enable = "simd128")]
825pub const fn f64x2(a0: f64, a1: f64) -> v128 {
826    simd::f64x2::new(a0, a1).v128()
827}
828
829#[inline]
844#[cfg_attr(test,
845    assert_instr(
846        i8x16.shuffle,
847        I0 = 0,
848        I1 = 2,
849        I2 = 4,
850        I3 = 6,
851        I4 = 8,
852        I5 = 10,
853        I6 = 12,
854        I7 = 14,
855        I8 = 16,
856        I9 = 18,
857        I10 = 20,
858        I11 = 22,
859        I12 = 24,
860        I13 = 26,
861        I14 = 28,
862        I15 = 30,
863    )
864)]
865#[target_feature(enable = "simd128")]
866#[doc(alias("i8x16.shuffle"))]
867#[stable(feature = "wasm_simd", since = "1.54.0")]
868pub fn i8x16_shuffle<
869    const I0: usize,
870    const I1: usize,
871    const I2: usize,
872    const I3: usize,
873    const I4: usize,
874    const I5: usize,
875    const I6: usize,
876    const I7: usize,
877    const I8: usize,
878    const I9: usize,
879    const I10: usize,
880    const I11: usize,
881    const I12: usize,
882    const I13: usize,
883    const I14: usize,
884    const I15: usize,
885>(
886    a: v128,
887    b: v128,
888) -> v128 {
889    static_assert!(I0 < 32);
890    static_assert!(I1 < 32);
891    static_assert!(I2 < 32);
892    static_assert!(I3 < 32);
893    static_assert!(I4 < 32);
894    static_assert!(I5 < 32);
895    static_assert!(I6 < 32);
896    static_assert!(I7 < 32);
897    static_assert!(I8 < 32);
898    static_assert!(I9 < 32);
899    static_assert!(I10 < 32);
900    static_assert!(I11 < 32);
901    static_assert!(I12 < 32);
902    static_assert!(I13 < 32);
903    static_assert!(I14 < 32);
904    static_assert!(I15 < 32);
905    let shuf: simd::u8x16 = unsafe {
906        simd_shuffle!(
907            a.as_u8x16(),
908            b.as_u8x16(),
909            [
910                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
911                I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
912                I14 as u32, I15 as u32,
913            ],
914        )
915    };
916    shuf.v128()
917}
918
919#[stable(feature = "wasm_simd", since = "1.54.0")]
920pub use i8x16_shuffle as u8x16_shuffle;
921
922#[inline]
930#[cfg_attr(test,
931    assert_instr(
932        i8x16.shuffle,
933        I0 = 0,
934        I1 = 2,
935        I2 = 4,
936        I3 = 6,
937        I4 = 8,
938        I5 = 10,
939        I6 = 12,
940        I7 = 14,
941    )
942)]
943#[target_feature(enable = "simd128")]
944#[doc(alias("i8x16.shuffle"))]
945#[stable(feature = "wasm_simd", since = "1.54.0")]
946pub fn i16x8_shuffle<
947    const I0: usize,
948    const I1: usize,
949    const I2: usize,
950    const I3: usize,
951    const I4: usize,
952    const I5: usize,
953    const I6: usize,
954    const I7: usize,
955>(
956    a: v128,
957    b: v128,
958) -> v128 {
959    static_assert!(I0 < 16);
960    static_assert!(I1 < 16);
961    static_assert!(I2 < 16);
962    static_assert!(I3 < 16);
963    static_assert!(I4 < 16);
964    static_assert!(I5 < 16);
965    static_assert!(I6 < 16);
966    static_assert!(I7 < 16);
967    let shuf: simd::u16x8 = unsafe {
968        simd_shuffle!(
969            a.as_u16x8(),
970            b.as_u16x8(),
971            [
972                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
973                I7 as u32,
974            ],
975        )
976    };
977    shuf.v128()
978}
979
980#[stable(feature = "wasm_simd", since = "1.54.0")]
981pub use i16x8_shuffle as u16x8_shuffle;
982
983#[inline]
991#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
992#[target_feature(enable = "simd128")]
993#[doc(alias("i8x16.shuffle"))]
994#[stable(feature = "wasm_simd", since = "1.54.0")]
995pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
996    a: v128,
997    b: v128,
998) -> v128 {
999    static_assert!(I0 < 8);
1000    static_assert!(I1 < 8);
1001    static_assert!(I2 < 8);
1002    static_assert!(I3 < 8);
1003    let shuf: simd::u32x4 = unsafe {
1004        simd_shuffle!(
1005            a.as_u32x4(),
1006            b.as_u32x4(),
1007            [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
1008        )
1009    };
1010    shuf.v128()
1011}
1012
1013#[stable(feature = "wasm_simd", since = "1.54.0")]
1014pub use i32x4_shuffle as u32x4_shuffle;
1015
1016#[inline]
1024#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1025#[target_feature(enable = "simd128")]
1026#[doc(alias("i8x16.shuffle"))]
1027#[stable(feature = "wasm_simd", since = "1.54.0")]
1028pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1029    static_assert!(I0 < 4);
1030    static_assert!(I1 < 4);
1031    let shuf: simd::u64x2 =
1032        unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
1033    shuf.v128()
1034}
1035
1036#[stable(feature = "wasm_simd", since = "1.54.0")]
1037pub use i64x2_shuffle as u64x2_shuffle;
1038
1039#[inline]
1044#[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1045#[target_feature(enable = "simd128")]
1046#[doc(alias("i8x16.extract_lane_s"))]
1047#[stable(feature = "wasm_simd", since = "1.54.0")]
1048pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1049    static_assert!(N < 16);
1050    unsafe { simd_extract!(a.as_i8x16(), N as u32) }
1051}
1052
1053#[inline]
1058#[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1059#[target_feature(enable = "simd128")]
1060#[doc(alias("i8x16.extract_lane_u"))]
1061#[stable(feature = "wasm_simd", since = "1.54.0")]
1062pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1063    static_assert!(N < 16);
1064    unsafe { simd_extract!(a.as_u8x16(), N as u32) }
1065}
1066
1067#[inline]
1072#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1073#[target_feature(enable = "simd128")]
1074#[doc(alias("i8x16.replace_lane"))]
1075#[stable(feature = "wasm_simd", since = "1.54.0")]
1076pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1077    static_assert!(N < 16);
1078    unsafe { simd_insert!(a.as_i8x16(), N as u32, val).v128() }
1079}
1080
1081#[inline]
1086#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1087#[target_feature(enable = "simd128")]
1088#[doc(alias("i8x16.replace_lane"))]
1089#[stable(feature = "wasm_simd", since = "1.54.0")]
1090pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1091    static_assert!(N < 16);
1092    unsafe { simd_insert!(a.as_u8x16(), N as u32, val).v128() }
1093}
1094
1095#[inline]
1100#[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1101#[target_feature(enable = "simd128")]
1102#[doc(alias("i16x8.extract_lane_s"))]
1103#[stable(feature = "wasm_simd", since = "1.54.0")]
1104pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1105    static_assert!(N < 8);
1106    unsafe { simd_extract!(a.as_i16x8(), N as u32) }
1107}
1108
1109#[inline]
1114#[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1115#[target_feature(enable = "simd128")]
1116#[doc(alias("i16x8.extract_lane_u"))]
1117#[stable(feature = "wasm_simd", since = "1.54.0")]
1118pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1119    static_assert!(N < 8);
1120    unsafe { simd_extract!(a.as_u16x8(), N as u32) }
1121}
1122
1123#[inline]
1128#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1129#[target_feature(enable = "simd128")]
1130#[doc(alias("i16x8.replace_lane"))]
1131#[stable(feature = "wasm_simd", since = "1.54.0")]
1132pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1133    static_assert!(N < 8);
1134    unsafe { simd_insert!(a.as_i16x8(), N as u32, val).v128() }
1135}
1136
1137#[inline]
1142#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1143#[target_feature(enable = "simd128")]
1144#[doc(alias("i16x8.replace_lane"))]
1145#[stable(feature = "wasm_simd", since = "1.54.0")]
1146pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1147    static_assert!(N < 8);
1148    unsafe { simd_insert!(a.as_u16x8(), N as u32, val).v128() }
1149}
1150
1151#[inline]
1156#[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1157#[target_feature(enable = "simd128")]
1158#[doc(alias("i32x4.extract_lane"))]
1159#[stable(feature = "wasm_simd", since = "1.54.0")]
1160pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1161    static_assert!(N < 4);
1162    unsafe { simd_extract!(a.as_i32x4(), N as u32) }
1163}
1164
1165#[inline]
1170#[target_feature(enable = "simd128")]
1171#[doc(alias("i32x4.extract_lane"))]
1172#[stable(feature = "wasm_simd", since = "1.54.0")]
1173pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1174    i32x4_extract_lane::<N>(a) as u32
1175}
1176
1177#[inline]
1182#[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1183#[target_feature(enable = "simd128")]
1184#[doc(alias("i32x4.replace_lane"))]
1185#[stable(feature = "wasm_simd", since = "1.54.0")]
1186pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1187    static_assert!(N < 4);
1188    unsafe { simd_insert!(a.as_i32x4(), N as u32, val).v128() }
1189}
1190
1191#[inline]
1196#[target_feature(enable = "simd128")]
1197#[doc(alias("i32x4.replace_lane"))]
1198#[stable(feature = "wasm_simd", since = "1.54.0")]
1199pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1200    i32x4_replace_lane::<N>(a, val as i32)
1201}
1202
1203#[inline]
1208#[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1209#[target_feature(enable = "simd128")]
1210#[doc(alias("i64x2.extract_lane"))]
1211#[stable(feature = "wasm_simd", since = "1.54.0")]
1212pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1213    static_assert!(N < 2);
1214    unsafe { simd_extract!(a.as_i64x2(), N as u32) }
1215}
1216
1217#[inline]
1222#[target_feature(enable = "simd128")]
1223#[doc(alias("i64x2.extract_lane"))]
1224#[stable(feature = "wasm_simd", since = "1.54.0")]
1225pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1226    i64x2_extract_lane::<N>(a) as u64
1227}
1228
1229#[inline]
1234#[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1235#[target_feature(enable = "simd128")]
1236#[doc(alias("i64x2.replace_lane"))]
1237#[stable(feature = "wasm_simd", since = "1.54.0")]
1238pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1239    static_assert!(N < 2);
1240    unsafe { simd_insert!(a.as_i64x2(), N as u32, val).v128() }
1241}
1242
1243#[inline]
1248#[target_feature(enable = "simd128")]
1249#[doc(alias("i64x2.replace_lane"))]
1250#[stable(feature = "wasm_simd", since = "1.54.0")]
1251pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1252    i64x2_replace_lane::<N>(a, val as i64)
1253}
1254
1255#[inline]
1260#[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1261#[target_feature(enable = "simd128")]
1262#[doc(alias("f32x4.extract_lane"))]
1263#[stable(feature = "wasm_simd", since = "1.54.0")]
1264pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1265    static_assert!(N < 4);
1266    unsafe { simd_extract!(a.as_f32x4(), N as u32) }
1267}
1268
1269#[inline]
1274#[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1275#[target_feature(enable = "simd128")]
1276#[doc(alias("f32x4.replace_lane"))]
1277#[stable(feature = "wasm_simd", since = "1.54.0")]
1278pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1279    static_assert!(N < 4);
1280    unsafe { simd_insert!(a.as_f32x4(), N as u32, val).v128() }
1281}
1282
1283#[inline]
1288#[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1289#[target_feature(enable = "simd128")]
1290#[doc(alias("f64x2.extract_lane"))]
1291#[stable(feature = "wasm_simd", since = "1.54.0")]
1292pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1293    static_assert!(N < 2);
1294    unsafe { simd_extract!(a.as_f64x2(), N as u32) }
1295}
1296
1297#[inline]
1302#[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1303#[target_feature(enable = "simd128")]
1304#[doc(alias("f64x2.replace_lane"))]
1305#[stable(feature = "wasm_simd", since = "1.54.0")]
1306pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1307    static_assert!(N < 2);
1308    unsafe { simd_insert!(a.as_f64x2(), N as u32, val).v128() }
1309}
1310
1311#[inline]
1317#[cfg_attr(test, assert_instr(i8x16.swizzle))]
1318#[target_feature(enable = "simd128")]
1319#[doc(alias("i8x16.swizzle"))]
1320#[stable(feature = "wasm_simd", since = "1.54.0")]
1321pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1322    unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1323}
1324
1325#[stable(feature = "wasm_simd", since = "1.54.0")]
1326pub use i8x16_swizzle as u8x16_swizzle;
1327
1328#[inline]
1332#[cfg_attr(test, assert_instr(i8x16.splat))]
1333#[target_feature(enable = "simd128")]
1334#[doc(alias("i8x16.splat"))]
1335#[stable(feature = "wasm_simd", since = "1.54.0")]
1336pub fn i8x16_splat(a: i8) -> v128 {
1337    simd::i8x16::splat(a).v128()
1338}
1339
1340#[inline]
1344#[cfg_attr(test, assert_instr(i8x16.splat))]
1345#[target_feature(enable = "simd128")]
1346#[doc(alias("i8x16.splat"))]
1347#[stable(feature = "wasm_simd", since = "1.54.0")]
1348pub fn u8x16_splat(a: u8) -> v128 {
1349    simd::u8x16::splat(a).v128()
1350}
1351
1352#[inline]
1356#[cfg_attr(test, assert_instr(i16x8.splat))]
1357#[target_feature(enable = "simd128")]
1358#[doc(alias("i16x8.splat"))]
1359#[stable(feature = "wasm_simd", since = "1.54.0")]
1360pub fn i16x8_splat(a: i16) -> v128 {
1361    simd::i16x8::splat(a).v128()
1362}
1363
1364#[inline]
1368#[cfg_attr(test, assert_instr(i16x8.splat))]
1369#[target_feature(enable = "simd128")]
1370#[doc(alias("i16x8.splat"))]
1371#[stable(feature = "wasm_simd", since = "1.54.0")]
1372pub fn u16x8_splat(a: u16) -> v128 {
1373    simd::u16x8::splat(a).v128()
1374}
1375
1376#[inline]
1380#[cfg_attr(test, assert_instr(i32x4.splat))]
1381#[target_feature(enable = "simd128")]
1382#[doc(alias("i32x4.splat"))]
1383#[stable(feature = "wasm_simd", since = "1.54.0")]
1384pub fn i32x4_splat(a: i32) -> v128 {
1385    simd::i32x4::splat(a).v128()
1386}
1387
1388#[inline]
1392#[target_feature(enable = "simd128")]
1393#[doc(alias("i32x4.splat"))]
1394#[stable(feature = "wasm_simd", since = "1.54.0")]
1395pub fn u32x4_splat(a: u32) -> v128 {
1396    i32x4_splat(a as i32)
1397}
1398
1399#[inline]
1403#[cfg_attr(test, assert_instr(i64x2.splat))]
1404#[target_feature(enable = "simd128")]
1405#[doc(alias("i64x2.splat"))]
1406#[stable(feature = "wasm_simd", since = "1.54.0")]
1407pub fn i64x2_splat(a: i64) -> v128 {
1408    simd::i64x2::splat(a).v128()
1409}
1410
1411#[inline]
1415#[target_feature(enable = "simd128")]
1416#[doc(alias("u64x2.splat"))]
1417#[stable(feature = "wasm_simd", since = "1.54.0")]
1418pub fn u64x2_splat(a: u64) -> v128 {
1419    i64x2_splat(a as i64)
1420}
1421
1422#[inline]
1426#[cfg_attr(test, assert_instr(f32x4.splat))]
1427#[target_feature(enable = "simd128")]
1428#[doc(alias("f32x4.splat"))]
1429#[stable(feature = "wasm_simd", since = "1.54.0")]
1430pub fn f32x4_splat(a: f32) -> v128 {
1431    simd::f32x4::splat(a).v128()
1432}
1433
1434#[inline]
1438#[cfg_attr(test, assert_instr(f64x2.splat))]
1439#[target_feature(enable = "simd128")]
1440#[doc(alias("f64x2.splat"))]
1441#[stable(feature = "wasm_simd", since = "1.54.0")]
1442pub fn f64x2_splat(a: f64) -> v128 {
1443    simd::f64x2::splat(a).v128()
1444}
1445
1446#[inline]
1452#[cfg_attr(test, assert_instr(i8x16.eq))]
1453#[target_feature(enable = "simd128")]
1454#[doc(alias("i8x16.eq"))]
1455#[stable(feature = "wasm_simd", since = "1.54.0")]
1456pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1457    unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1458}
1459
1460#[inline]
1466#[cfg_attr(test, assert_instr(i8x16.ne))]
1467#[target_feature(enable = "simd128")]
1468#[doc(alias("i8x16.ne"))]
1469#[stable(feature = "wasm_simd", since = "1.54.0")]
1470pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1471    unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1472}
1473
1474#[stable(feature = "wasm_simd", since = "1.54.0")]
1475pub use i8x16_eq as u8x16_eq;
1476#[stable(feature = "wasm_simd", since = "1.54.0")]
1477pub use i8x16_ne as u8x16_ne;
1478
1479#[inline]
1485#[cfg_attr(test, assert_instr(i8x16.lt_s))]
1486#[target_feature(enable = "simd128")]
1487#[doc(alias("i8x16.lt_s"))]
1488#[stable(feature = "wasm_simd", since = "1.54.0")]
1489pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1490    unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1491}
1492
1493#[inline]
1499#[cfg_attr(test, assert_instr(i8x16.lt_u))]
1500#[target_feature(enable = "simd128")]
1501#[doc(alias("i8x16.lt_u"))]
1502#[stable(feature = "wasm_simd", since = "1.54.0")]
1503pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1504    unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1505}
1506
1507#[inline]
1513#[cfg_attr(test, assert_instr(i8x16.gt_s))]
1514#[target_feature(enable = "simd128")]
1515#[doc(alias("i8x16.gt_s"))]
1516#[stable(feature = "wasm_simd", since = "1.54.0")]
1517pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1518    unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1519}
1520
1521#[inline]
1527#[cfg_attr(test, assert_instr(i8x16.gt_u))]
1528#[target_feature(enable = "simd128")]
1529#[doc(alias("i8x16.gt_u"))]
1530#[stable(feature = "wasm_simd", since = "1.54.0")]
1531pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1532    unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1533}
1534
1535#[inline]
1541#[cfg_attr(test, assert_instr(i8x16.le_s))]
1542#[target_feature(enable = "simd128")]
1543#[doc(alias("i8x16.le_s"))]
1544#[stable(feature = "wasm_simd", since = "1.54.0")]
1545pub fn i8x16_le(a: v128, b: v128) -> v128 {
1546    unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1547}
1548
1549#[inline]
1555#[cfg_attr(test, assert_instr(i8x16.le_u))]
1556#[target_feature(enable = "simd128")]
1557#[doc(alias("i8x16.le_u"))]
1558#[stable(feature = "wasm_simd", since = "1.54.0")]
1559pub fn u8x16_le(a: v128, b: v128) -> v128 {
1560    unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1561}
1562
1563#[inline]
1569#[cfg_attr(test, assert_instr(i8x16.ge_s))]
1570#[target_feature(enable = "simd128")]
1571#[doc(alias("i8x16.ge_s"))]
1572#[stable(feature = "wasm_simd", since = "1.54.0")]
1573pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1574    unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1575}
1576
1577#[inline]
1583#[cfg_attr(test, assert_instr(i8x16.ge_u))]
1584#[target_feature(enable = "simd128")]
1585#[doc(alias("i8x16.ge_u"))]
1586#[stable(feature = "wasm_simd", since = "1.54.0")]
1587pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1588    unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1589}
1590
1591#[inline]
1597#[cfg_attr(test, assert_instr(i16x8.eq))]
1598#[target_feature(enable = "simd128")]
1599#[doc(alias("i16x8.eq"))]
1600#[stable(feature = "wasm_simd", since = "1.54.0")]
1601pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1602    unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1603}
1604
1605#[inline]
1611#[cfg_attr(test, assert_instr(i16x8.ne))]
1612#[target_feature(enable = "simd128")]
1613#[doc(alias("i16x8.ne"))]
1614#[stable(feature = "wasm_simd", since = "1.54.0")]
1615pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1616    unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1617}
1618
1619#[stable(feature = "wasm_simd", since = "1.54.0")]
1620pub use i16x8_eq as u16x8_eq;
1621#[stable(feature = "wasm_simd", since = "1.54.0")]
1622pub use i16x8_ne as u16x8_ne;
1623
1624#[inline]
1630#[cfg_attr(test, assert_instr(i16x8.lt_s))]
1631#[target_feature(enable = "simd128")]
1632#[doc(alias("i16x8.lt_s"))]
1633#[stable(feature = "wasm_simd", since = "1.54.0")]
1634pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1635    unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1636}
1637
1638#[inline]
1644#[cfg_attr(test, assert_instr(i16x8.lt_u))]
1645#[target_feature(enable = "simd128")]
1646#[doc(alias("i16x8.lt_u"))]
1647#[stable(feature = "wasm_simd", since = "1.54.0")]
1648pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1649    unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1650}
1651
1652#[inline]
1658#[cfg_attr(test, assert_instr(i16x8.gt_s))]
1659#[target_feature(enable = "simd128")]
1660#[doc(alias("i16x8.gt_s"))]
1661#[stable(feature = "wasm_simd", since = "1.54.0")]
1662pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1663    unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1664}
1665
1666#[inline]
1672#[cfg_attr(test, assert_instr(i16x8.gt_u))]
1673#[target_feature(enable = "simd128")]
1674#[doc(alias("i16x8.gt_u"))]
1675#[stable(feature = "wasm_simd", since = "1.54.0")]
1676pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1677    unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1678}
1679
1680#[inline]
1686#[cfg_attr(test, assert_instr(i16x8.le_s))]
1687#[target_feature(enable = "simd128")]
1688#[doc(alias("i16x8.le_s"))]
1689#[stable(feature = "wasm_simd", since = "1.54.0")]
1690pub fn i16x8_le(a: v128, b: v128) -> v128 {
1691    unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1692}
1693
1694#[inline]
1700#[cfg_attr(test, assert_instr(i16x8.le_u))]
1701#[target_feature(enable = "simd128")]
1702#[doc(alias("i16x8.le_u"))]
1703#[stable(feature = "wasm_simd", since = "1.54.0")]
1704pub fn u16x8_le(a: v128, b: v128) -> v128 {
1705    unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1706}
1707
1708#[inline]
1714#[cfg_attr(test, assert_instr(i16x8.ge_s))]
1715#[target_feature(enable = "simd128")]
1716#[doc(alias("i16x8.ge_s"))]
1717#[stable(feature = "wasm_simd", since = "1.54.0")]
1718pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1719    unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1720}
1721
1722#[inline]
1728#[cfg_attr(test, assert_instr(i16x8.ge_u))]
1729#[target_feature(enable = "simd128")]
1730#[doc(alias("i16x8.ge_u"))]
1731#[stable(feature = "wasm_simd", since = "1.54.0")]
1732pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1733    unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1734}
1735
1736#[inline]
1742#[cfg_attr(test, assert_instr(i32x4.eq))]
1743#[target_feature(enable = "simd128")]
1744#[doc(alias("i32x4.eq"))]
1745#[stable(feature = "wasm_simd", since = "1.54.0")]
1746pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1747    unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1748}
1749
1750#[inline]
1756#[cfg_attr(test, assert_instr(i32x4.ne))]
1757#[target_feature(enable = "simd128")]
1758#[doc(alias("i32x4.ne"))]
1759#[stable(feature = "wasm_simd", since = "1.54.0")]
1760pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1761    unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1762}
1763
1764#[stable(feature = "wasm_simd", since = "1.54.0")]
1765pub use i32x4_eq as u32x4_eq;
1766#[stable(feature = "wasm_simd", since = "1.54.0")]
1767pub use i32x4_ne as u32x4_ne;
1768
1769#[inline]
1775#[cfg_attr(test, assert_instr(i32x4.lt_s))]
1776#[target_feature(enable = "simd128")]
1777#[doc(alias("i32x4.lt_s"))]
1778#[stable(feature = "wasm_simd", since = "1.54.0")]
1779pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1780    unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1781}
1782
1783#[inline]
1789#[cfg_attr(test, assert_instr(i32x4.lt_u))]
1790#[target_feature(enable = "simd128")]
1791#[doc(alias("i32x4.lt_u"))]
1792#[stable(feature = "wasm_simd", since = "1.54.0")]
1793pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1794    unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1795}
1796
1797#[inline]
1803#[cfg_attr(test, assert_instr(i32x4.gt_s))]
1804#[target_feature(enable = "simd128")]
1805#[doc(alias("i32x4.gt_s"))]
1806#[stable(feature = "wasm_simd", since = "1.54.0")]
1807pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1808    unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1809}
1810
1811#[inline]
1817#[cfg_attr(test, assert_instr(i32x4.gt_u))]
1818#[target_feature(enable = "simd128")]
1819#[doc(alias("i32x4.gt_u"))]
1820#[stable(feature = "wasm_simd", since = "1.54.0")]
1821pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1822    unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1823}
1824
1825#[inline]
1831#[cfg_attr(test, assert_instr(i32x4.le_s))]
1832#[target_feature(enable = "simd128")]
1833#[doc(alias("i32x4.le_s"))]
1834#[stable(feature = "wasm_simd", since = "1.54.0")]
1835pub fn i32x4_le(a: v128, b: v128) -> v128 {
1836    unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1837}
1838
1839#[inline]
1845#[cfg_attr(test, assert_instr(i32x4.le_u))]
1846#[target_feature(enable = "simd128")]
1847#[doc(alias("i32x4.le_u"))]
1848#[stable(feature = "wasm_simd", since = "1.54.0")]
1849pub fn u32x4_le(a: v128, b: v128) -> v128 {
1850    unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1851}
1852
1853#[inline]
1859#[cfg_attr(test, assert_instr(i32x4.ge_s))]
1860#[target_feature(enable = "simd128")]
1861#[doc(alias("i32x4.ge_s"))]
1862#[stable(feature = "wasm_simd", since = "1.54.0")]
1863pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1864    unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1865}
1866
1867#[inline]
1873#[cfg_attr(test, assert_instr(i32x4.ge_u))]
1874#[target_feature(enable = "simd128")]
1875#[doc(alias("i32x4.ge_u"))]
1876#[stable(feature = "wasm_simd", since = "1.54.0")]
1877pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1878    unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1879}
1880
1881#[inline]
1887#[cfg_attr(test, assert_instr(i64x2.eq))]
1888#[target_feature(enable = "simd128")]
1889#[doc(alias("i64x2.eq"))]
1890#[stable(feature = "wasm_simd", since = "1.54.0")]
1891pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1892    unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1893}
1894
1895#[inline]
1901#[cfg_attr(test, assert_instr(i64x2.ne))]
1902#[target_feature(enable = "simd128")]
1903#[doc(alias("i64x2.ne"))]
1904#[stable(feature = "wasm_simd", since = "1.54.0")]
1905pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1906    unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1907}
1908
1909#[stable(feature = "wasm_simd", since = "1.54.0")]
1910pub use i64x2_eq as u64x2_eq;
1911#[stable(feature = "wasm_simd", since = "1.54.0")]
1912pub use i64x2_ne as u64x2_ne;
1913
1914#[inline]
1920#[cfg_attr(test, assert_instr(i64x2.lt_s))]
1921#[target_feature(enable = "simd128")]
1922#[doc(alias("i64x2.lt_s"))]
1923#[stable(feature = "wasm_simd", since = "1.54.0")]
1924pub fn i64x2_lt(a: v128, b: v128) -> v128 {
1925    unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1926}
1927
1928#[inline]
1934#[cfg_attr(test, assert_instr(i64x2.gt_s))]
1935#[target_feature(enable = "simd128")]
1936#[doc(alias("i64x2.gt_s"))]
1937#[stable(feature = "wasm_simd", since = "1.54.0")]
1938pub fn i64x2_gt(a: v128, b: v128) -> v128 {
1939    unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1940}
1941
1942#[inline]
1948#[cfg_attr(test, assert_instr(i64x2.le_s))]
1949#[target_feature(enable = "simd128")]
1950#[doc(alias("i64x2.le_s"))]
1951#[stable(feature = "wasm_simd", since = "1.54.0")]
1952pub fn i64x2_le(a: v128, b: v128) -> v128 {
1953    unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1954}
1955
1956#[inline]
1962#[cfg_attr(test, assert_instr(i64x2.ge_s))]
1963#[target_feature(enable = "simd128")]
1964#[doc(alias("i64x2.ge_s"))]
1965#[stable(feature = "wasm_simd", since = "1.54.0")]
1966pub fn i64x2_ge(a: v128, b: v128) -> v128 {
1967    unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1968}
1969
1970#[inline]
1976#[cfg_attr(test, assert_instr(f32x4.eq))]
1977#[target_feature(enable = "simd128")]
1978#[doc(alias("f32x4.eq"))]
1979#[stable(feature = "wasm_simd", since = "1.54.0")]
1980pub fn f32x4_eq(a: v128, b: v128) -> v128 {
1981    unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1982}
1983
1984#[inline]
1990#[cfg_attr(test, assert_instr(f32x4.ne))]
1991#[target_feature(enable = "simd128")]
1992#[doc(alias("f32x4.ne"))]
1993#[stable(feature = "wasm_simd", since = "1.54.0")]
1994pub fn f32x4_ne(a: v128, b: v128) -> v128 {
1995    unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1996}
1997
1998#[inline]
2004#[cfg_attr(test, assert_instr(f32x4.lt))]
2005#[target_feature(enable = "simd128")]
2006#[doc(alias("f32x4.lt"))]
2007#[stable(feature = "wasm_simd", since = "1.54.0")]
2008pub fn f32x4_lt(a: v128, b: v128) -> v128 {
2009    unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2010}
2011
2012#[inline]
2018#[cfg_attr(test, assert_instr(f32x4.gt))]
2019#[target_feature(enable = "simd128")]
2020#[doc(alias("f32x4.gt"))]
2021#[stable(feature = "wasm_simd", since = "1.54.0")]
2022pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2023    unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2024}
2025
2026#[inline]
2032#[cfg_attr(test, assert_instr(f32x4.le))]
2033#[target_feature(enable = "simd128")]
2034#[doc(alias("f32x4.le"))]
2035#[stable(feature = "wasm_simd", since = "1.54.0")]
2036pub fn f32x4_le(a: v128, b: v128) -> v128 {
2037    unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2038}
2039
2040#[inline]
2046#[cfg_attr(test, assert_instr(f32x4.ge))]
2047#[target_feature(enable = "simd128")]
2048#[doc(alias("f32x4.ge"))]
2049#[stable(feature = "wasm_simd", since = "1.54.0")]
2050pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2051    unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2052}
2053
2054#[inline]
2060#[cfg_attr(test, assert_instr(f64x2.eq))]
2061#[target_feature(enable = "simd128")]
2062#[doc(alias("f64x2.eq"))]
2063#[stable(feature = "wasm_simd", since = "1.54.0")]
2064pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2065    unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2066}
2067
2068#[inline]
2074#[cfg_attr(test, assert_instr(f64x2.ne))]
2075#[target_feature(enable = "simd128")]
2076#[doc(alias("f64x2.ne"))]
2077#[stable(feature = "wasm_simd", since = "1.54.0")]
2078pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2079    unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2080}
2081
2082#[inline]
2088#[cfg_attr(test, assert_instr(f64x2.lt))]
2089#[target_feature(enable = "simd128")]
2090#[doc(alias("f64x2.lt"))]
2091#[stable(feature = "wasm_simd", since = "1.54.0")]
2092pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2093    unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2094}
2095
2096#[inline]
2102#[cfg_attr(test, assert_instr(f64x2.gt))]
2103#[target_feature(enable = "simd128")]
2104#[doc(alias("f64x2.gt"))]
2105#[stable(feature = "wasm_simd", since = "1.54.0")]
2106pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2107    unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2108}
2109
2110#[inline]
2116#[cfg_attr(test, assert_instr(f64x2.le))]
2117#[target_feature(enable = "simd128")]
2118#[doc(alias("f64x2.le"))]
2119#[stable(feature = "wasm_simd", since = "1.54.0")]
2120pub fn f64x2_le(a: v128, b: v128) -> v128 {
2121    unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2122}
2123
2124#[inline]
2130#[cfg_attr(test, assert_instr(f64x2.ge))]
2131#[target_feature(enable = "simd128")]
2132#[doc(alias("f64x2.ge"))]
2133#[stable(feature = "wasm_simd", since = "1.54.0")]
2134pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2135    unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2136}
2137
2138#[inline]
2140#[cfg_attr(test, assert_instr(v128.not))]
2141#[target_feature(enable = "simd128")]
2142#[doc(alias("v128.not"))]
2143#[stable(feature = "wasm_simd", since = "1.54.0")]
2144pub fn v128_not(a: v128) -> v128 {
2145    unsafe { simd_xor(a.as_i64x2(), simd::i64x2::new(!0, !0)).v128() }
2146}
2147
2148#[inline]
2151#[cfg_attr(test, assert_instr(v128.and))]
2152#[target_feature(enable = "simd128")]
2153#[doc(alias("v128.and"))]
2154#[stable(feature = "wasm_simd", since = "1.54.0")]
2155pub fn v128_and(a: v128, b: v128) -> v128 {
2156    unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2157}
2158
2159#[inline]
2163#[cfg_attr(test, assert_instr(v128.andnot))]
2164#[target_feature(enable = "simd128")]
2165#[doc(alias("v128.andnot"))]
2166#[stable(feature = "wasm_simd", since = "1.54.0")]
2167pub fn v128_andnot(a: v128, b: v128) -> v128 {
2168    unsafe {
2169        simd_and(
2170            a.as_i64x2(),
2171            simd_xor(b.as_i64x2(), simd::i64x2::new(-1, -1)),
2172        )
2173        .v128()
2174    }
2175}
2176
2177#[inline]
2180#[cfg_attr(test, assert_instr(v128.or))]
2181#[target_feature(enable = "simd128")]
2182#[doc(alias("v128.or"))]
2183#[stable(feature = "wasm_simd", since = "1.54.0")]
2184pub fn v128_or(a: v128, b: v128) -> v128 {
2185    unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2186}
2187
2188#[inline]
2191#[cfg_attr(test, assert_instr(v128.xor))]
2192#[target_feature(enable = "simd128")]
2193#[doc(alias("v128.xor"))]
2194#[stable(feature = "wasm_simd", since = "1.54.0")]
2195pub fn v128_xor(a: v128, b: v128) -> v128 {
2196    unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2197}
2198
2199#[inline]
2201#[cfg_attr(test, assert_instr(v128.bitselect))]
2202#[target_feature(enable = "simd128")]
2203#[doc(alias("v128.bitselect"))]
2204#[stable(feature = "wasm_simd", since = "1.54.0")]
2205pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2206    unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2207}
2208
2209#[inline]
2211#[cfg_attr(test, assert_instr(v128.any_true))]
2212#[target_feature(enable = "simd128")]
2213#[doc(alias("v128.any_true"))]
2214#[stable(feature = "wasm_simd", since = "1.54.0")]
2215pub fn v128_any_true(a: v128) -> bool {
2216    unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2217}
2218
2219#[inline]
2221#[cfg_attr(test, assert_instr(i8x16.abs))]
2222#[target_feature(enable = "simd128")]
2223#[doc(alias("i8x16.abs"))]
2224#[stable(feature = "wasm_simd", since = "1.54.0")]
2225pub fn i8x16_abs(a: v128) -> v128 {
2226    unsafe {
2227        let a = a.as_i8x16();
2228        let zero = simd::i8x16::ZERO;
2229        simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2230    }
2231}
2232
2233#[inline]
2235#[cfg_attr(test, assert_instr(i8x16.neg))]
2236#[target_feature(enable = "simd128")]
2237#[doc(alias("i8x16.neg"))]
2238#[stable(feature = "wasm_simd", since = "1.54.0")]
2239pub fn i8x16_neg(a: v128) -> v128 {
2240    unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2241}
2242
2243#[inline]
2245#[cfg_attr(test, assert_instr(i8x16.popcnt))]
2246#[target_feature(enable = "simd128")]
2247#[doc(alias("i8x16.popcnt"))]
2248#[stable(feature = "wasm_simd", since = "1.54.0")]
2249pub fn i8x16_popcnt(v: v128) -> v128 {
2250    unsafe { simd_ctpop(v.as_i8x16()).v128() }
2251}
2252
2253#[stable(feature = "wasm_simd", since = "1.54.0")]
2254pub use i8x16_popcnt as u8x16_popcnt;
2255
2256#[inline]
2258#[cfg_attr(test, assert_instr(i8x16.all_true))]
2259#[target_feature(enable = "simd128")]
2260#[doc(alias("i8x16.all_true"))]
2261#[stable(feature = "wasm_simd", since = "1.54.0")]
2262pub fn i8x16_all_true(a: v128) -> bool {
2263    unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2264}
2265
2266#[stable(feature = "wasm_simd", since = "1.54.0")]
2267pub use i8x16_all_true as u8x16_all_true;
2268
2269#[inline]
2272#[cfg_attr(test, assert_instr(i8x16.bitmask))]
2273#[target_feature(enable = "simd128")]
2274#[doc(alias("i8x16.bitmask"))]
2275#[stable(feature = "wasm_simd", since = "1.54.0")]
2276pub fn i8x16_bitmask(a: v128) -> u16 {
2277    unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2278}
2279
2280#[stable(feature = "wasm_simd", since = "1.54.0")]
2281pub use i8x16_bitmask as u8x16_bitmask;
2282
2283#[inline]
2289#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2290#[target_feature(enable = "simd128")]
2291#[doc(alias("i8x16.narrow_i16x8_s"))]
2292#[stable(feature = "wasm_simd", since = "1.54.0")]
2293pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2294    unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() }
2295}
2296
2297#[inline]
2303#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2304#[target_feature(enable = "simd128")]
2305#[doc(alias("i8x16.narrow_i16x8_u"))]
2306#[stable(feature = "wasm_simd", since = "1.54.0")]
2307pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2308    unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() }
2309}
2310
2311#[inline]
2316#[cfg_attr(test, assert_instr(i8x16.shl))]
2317#[target_feature(enable = "simd128")]
2318#[doc(alias("i8x16.shl"))]
2319#[stable(feature = "wasm_simd", since = "1.54.0")]
2320pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2321    unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2340}
2341
2342#[stable(feature = "wasm_simd", since = "1.54.0")]
2343pub use i8x16_shl as u8x16_shl;
2344
2345#[inline]
2351#[cfg_attr(test, assert_instr(i8x16.shr_s))]
2352#[target_feature(enable = "simd128")]
2353#[doc(alias("i8x16.shr_s"))]
2354#[stable(feature = "wasm_simd", since = "1.54.0")]
2355pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2356    unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2359}
2360
2361#[inline]
2367#[cfg_attr(test, assert_instr(i8x16.shr_u))]
2368#[target_feature(enable = "simd128")]
2369#[doc(alias("i8x16.shr_u"))]
2370#[stable(feature = "wasm_simd", since = "1.54.0")]
2371pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2372    unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat((amt & 0x7) as u8)).v128() }
2375}
2376
2377#[inline]
2379#[cfg_attr(test, assert_instr(i8x16.add))]
2380#[target_feature(enable = "simd128")]
2381#[doc(alias("i8x16.add"))]
2382#[stable(feature = "wasm_simd", since = "1.54.0")]
2383pub fn i8x16_add(a: v128, b: v128) -> v128 {
2384    unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2385}
2386
2387#[stable(feature = "wasm_simd", since = "1.54.0")]
2388pub use i8x16_add as u8x16_add;
2389
2390#[inline]
2393#[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2394#[target_feature(enable = "simd128")]
2395#[doc(alias("i8x16.add_sat_s"))]
2396#[stable(feature = "wasm_simd", since = "1.54.0")]
2397pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2398    unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
2399}
2400
2401#[inline]
2404#[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2405#[target_feature(enable = "simd128")]
2406#[doc(alias("i8x16.add_sat_u"))]
2407#[stable(feature = "wasm_simd", since = "1.54.0")]
2408pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2409    unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
2410}
2411
2412#[inline]
2414#[cfg_attr(test, assert_instr(i8x16.sub))]
2415#[target_feature(enable = "simd128")]
2416#[doc(alias("i8x16.sub"))]
2417#[stable(feature = "wasm_simd", since = "1.54.0")]
2418pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2419    unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2420}
2421
2422#[stable(feature = "wasm_simd", since = "1.54.0")]
2423pub use i8x16_sub as u8x16_sub;
2424
2425#[inline]
2428#[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2429#[target_feature(enable = "simd128")]
2430#[doc(alias("i8x16.sub_sat_s"))]
2431#[stable(feature = "wasm_simd", since = "1.54.0")]
2432pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2433    unsafe { simd_saturating_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2434}
2435
2436#[inline]
2439#[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2440#[target_feature(enable = "simd128")]
2441#[doc(alias("i8x16.sub_sat_u"))]
2442#[stable(feature = "wasm_simd", since = "1.54.0")]
2443pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2444    unsafe { simd_saturating_sub(a.as_u8x16(), b.as_u8x16()).v128() }
2445}
2446
2447#[inline]
2450#[cfg_attr(test, assert_instr(i8x16.min_s))]
2451#[target_feature(enable = "simd128")]
2452#[doc(alias("i8x16.min_s"))]
2453#[stable(feature = "wasm_simd", since = "1.54.0")]
2454pub fn i8x16_min(a: v128, b: v128) -> v128 {
2455    let a = a.as_i8x16();
2456    let b = b.as_i8x16();
2457    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2458}
2459
2460#[inline]
2463#[cfg_attr(test, assert_instr(i8x16.min_u))]
2464#[target_feature(enable = "simd128")]
2465#[doc(alias("i8x16.min_u"))]
2466#[stable(feature = "wasm_simd", since = "1.54.0")]
2467pub fn u8x16_min(a: v128, b: v128) -> v128 {
2468    let a = a.as_u8x16();
2469    let b = b.as_u8x16();
2470    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2471}
2472
2473#[inline]
2476#[cfg_attr(test, assert_instr(i8x16.max_s))]
2477#[target_feature(enable = "simd128")]
2478#[doc(alias("i8x16.max_s"))]
2479#[stable(feature = "wasm_simd", since = "1.54.0")]
2480pub fn i8x16_max(a: v128, b: v128) -> v128 {
2481    let a = a.as_i8x16();
2482    let b = b.as_i8x16();
2483    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2484}
2485
2486#[inline]
2489#[cfg_attr(test, assert_instr(i8x16.max_u))]
2490#[target_feature(enable = "simd128")]
2491#[doc(alias("i8x16.max_u"))]
2492#[stable(feature = "wasm_simd", since = "1.54.0")]
2493pub fn u8x16_max(a: v128, b: v128) -> v128 {
2494    let a = a.as_u8x16();
2495    let b = b.as_u8x16();
2496    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2497}
2498
2499#[inline]
2501#[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2502#[target_feature(enable = "simd128")]
2503#[doc(alias("i8x16.avgr_u"))]
2504#[stable(feature = "wasm_simd", since = "1.54.0")]
2505pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2506    unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2507}
2508
2509#[inline]
2512#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2513#[target_feature(enable = "simd128")]
2514#[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2515#[stable(feature = "wasm_simd", since = "1.54.0")]
2516pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2517    unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2518}
2519
2520#[inline]
2523#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2524#[target_feature(enable = "simd128")]
2525#[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2526#[stable(feature = "wasm_simd", since = "1.54.0")]
2527pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2528    unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2529}
2530
2531#[stable(feature = "wasm_simd", since = "1.54.0")]
2532pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2533
2534#[inline]
2536#[cfg_attr(test, assert_instr(i16x8.abs))]
2537#[target_feature(enable = "simd128")]
2538#[doc(alias("i16x8.abs"))]
2539#[stable(feature = "wasm_simd", since = "1.54.0")]
2540pub fn i16x8_abs(a: v128) -> v128 {
2541    let a = a.as_i16x8();
2542    let zero = simd::i16x8::ZERO;
2543    unsafe {
2544        simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2545    }
2546}
2547
2548#[inline]
2550#[cfg_attr(test, assert_instr(i16x8.neg))]
2551#[target_feature(enable = "simd128")]
2552#[doc(alias("i16x8.neg"))]
2553#[stable(feature = "wasm_simd", since = "1.54.0")]
2554pub fn i16x8_neg(a: v128) -> v128 {
2555    unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2556}
2557
2558#[inline]
2560#[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2561#[target_feature(enable = "simd128")]
2562#[doc(alias("i16x8.q15mulr_sat_s"))]
2563#[stable(feature = "wasm_simd", since = "1.54.0")]
2564pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2565    unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2566}
2567
2568#[inline]
2570#[cfg_attr(test, assert_instr(i16x8.all_true))]
2571#[target_feature(enable = "simd128")]
2572#[doc(alias("i16x8.all_true"))]
2573#[stable(feature = "wasm_simd", since = "1.54.0")]
2574pub fn i16x8_all_true(a: v128) -> bool {
2575    unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2576}
2577
2578#[stable(feature = "wasm_simd", since = "1.54.0")]
2579pub use i16x8_all_true as u16x8_all_true;
2580
2581#[inline]
2584#[cfg_attr(test, assert_instr(i16x8.bitmask))]
2585#[target_feature(enable = "simd128")]
2586#[doc(alias("i16x8.bitmask"))]
2587#[stable(feature = "wasm_simd", since = "1.54.0")]
2588pub fn i16x8_bitmask(a: v128) -> u8 {
2589    unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2590}
2591
2592#[stable(feature = "wasm_simd", since = "1.54.0")]
2593pub use i16x8_bitmask as u16x8_bitmask;
2594
2595#[inline]
2601#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2602#[target_feature(enable = "simd128")]
2603#[doc(alias("i16x8.narrow_i32x4_s"))]
2604#[stable(feature = "wasm_simd", since = "1.54.0")]
2605pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2606    unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() }
2607}
2608
2609#[inline]
2615#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2616#[target_feature(enable = "simd128")]
2617#[doc(alias("i16x8.narrow_i32x4_u"))]
2618#[stable(feature = "wasm_simd", since = "1.54.0")]
2619pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2620    unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() }
2621}
2622
2623#[inline]
2626#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2627#[target_feature(enable = "simd128")]
2628#[doc(alias("i16x8.extend_low_i8x16_s"))]
2629#[stable(feature = "wasm_simd", since = "1.54.0")]
2630pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2631    unsafe {
2632        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2633            a.as_i8x16(),
2634            a.as_i8x16(),
2635            [0, 1, 2, 3, 4, 5, 6, 7],
2636        ))
2637        .v128()
2638    }
2639}
2640
2641#[inline]
2644#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2645#[target_feature(enable = "simd128")]
2646#[doc(alias("i16x8.extend_high_i8x16_s"))]
2647#[stable(feature = "wasm_simd", since = "1.54.0")]
2648pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2649    unsafe {
2650        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2651            a.as_i8x16(),
2652            a.as_i8x16(),
2653            [8, 9, 10, 11, 12, 13, 14, 15],
2654        ))
2655        .v128()
2656    }
2657}
2658
2659#[inline]
2662#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2663#[target_feature(enable = "simd128")]
2664#[doc(alias("i16x8.extend_low_i8x16_u"))]
2665#[stable(feature = "wasm_simd", since = "1.54.0")]
2666pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2667    unsafe {
2668        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2669            a.as_u8x16(),
2670            a.as_u8x16(),
2671            [0, 1, 2, 3, 4, 5, 6, 7],
2672        ))
2673        .v128()
2674    }
2675}
2676
2677#[stable(feature = "wasm_simd", since = "1.54.0")]
2678pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2679
2680#[inline]
2683#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2684#[target_feature(enable = "simd128")]
2685#[doc(alias("i16x8.extend_high_i8x16_u"))]
2686#[stable(feature = "wasm_simd", since = "1.54.0")]
2687pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2688    unsafe {
2689        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2690            a.as_u8x16(),
2691            a.as_u8x16(),
2692            [8, 9, 10, 11, 12, 13, 14, 15],
2693        ))
2694        .v128()
2695    }
2696}
2697
2698#[stable(feature = "wasm_simd", since = "1.54.0")]
2699pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2700
2701#[inline]
2706#[cfg_attr(test, assert_instr(i16x8.shl))]
2707#[target_feature(enable = "simd128")]
2708#[doc(alias("i16x8.shl"))]
2709#[stable(feature = "wasm_simd", since = "1.54.0")]
2710pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2711    unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2714}
2715
2716#[stable(feature = "wasm_simd", since = "1.54.0")]
2717pub use i16x8_shl as u16x8_shl;
2718
2719#[inline]
2725#[cfg_attr(test, assert_instr(i16x8.shr_s))]
2726#[target_feature(enable = "simd128")]
2727#[doc(alias("i16x8.shr_s"))]
2728#[stable(feature = "wasm_simd", since = "1.54.0")]
2729pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2730    unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2733}
2734
2735#[inline]
2741#[cfg_attr(test, assert_instr(i16x8.shr_u))]
2742#[target_feature(enable = "simd128")]
2743#[doc(alias("i16x8.shr_u"))]
2744#[stable(feature = "wasm_simd", since = "1.54.0")]
2745pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2746    unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat((amt & 0xf) as u16)).v128() }
2749}
2750
2751#[inline]
2753#[cfg_attr(test, assert_instr(i16x8.add))]
2754#[target_feature(enable = "simd128")]
2755#[doc(alias("i16x8.add"))]
2756#[stable(feature = "wasm_simd", since = "1.54.0")]
2757pub fn i16x8_add(a: v128, b: v128) -> v128 {
2758    unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2759}
2760
2761#[stable(feature = "wasm_simd", since = "1.54.0")]
2762pub use i16x8_add as u16x8_add;
2763
2764#[inline]
2767#[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2768#[target_feature(enable = "simd128")]
2769#[doc(alias("i16x8.add_sat_s"))]
2770#[stable(feature = "wasm_simd", since = "1.54.0")]
2771pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2772    unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
2773}
2774
2775#[inline]
2778#[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2779#[target_feature(enable = "simd128")]
2780#[doc(alias("i16x8.add_sat_u"))]
2781#[stable(feature = "wasm_simd", since = "1.54.0")]
2782pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2783    unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
2784}
2785
2786#[inline]
2788#[cfg_attr(test, assert_instr(i16x8.sub))]
2789#[target_feature(enable = "simd128")]
2790#[doc(alias("i16x8.sub"))]
2791#[stable(feature = "wasm_simd", since = "1.54.0")]
2792pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2793    unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2794}
2795
2796#[stable(feature = "wasm_simd", since = "1.54.0")]
2797pub use i16x8_sub as u16x8_sub;
2798
2799#[inline]
2802#[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2803#[target_feature(enable = "simd128")]
2804#[doc(alias("i16x8.sub_sat_s"))]
2805#[stable(feature = "wasm_simd", since = "1.54.0")]
2806pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2807    unsafe { simd_saturating_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2808}
2809
2810#[inline]
2813#[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2814#[target_feature(enable = "simd128")]
2815#[doc(alias("i16x8.sub_sat_u"))]
2816#[stable(feature = "wasm_simd", since = "1.54.0")]
2817pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2818    unsafe { simd_saturating_sub(a.as_u16x8(), b.as_u16x8()).v128() }
2819}
2820
2821#[inline]
2824#[cfg_attr(test, assert_instr(i16x8.mul))]
2825#[target_feature(enable = "simd128")]
2826#[doc(alias("i16x8.mul"))]
2827#[stable(feature = "wasm_simd", since = "1.54.0")]
2828pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2829    unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2830}
2831
2832#[stable(feature = "wasm_simd", since = "1.54.0")]
2833pub use i16x8_mul as u16x8_mul;
2834
2835#[inline]
2838#[cfg_attr(test, assert_instr(i16x8.min_s))]
2839#[target_feature(enable = "simd128")]
2840#[doc(alias("i16x8.min_s"))]
2841#[stable(feature = "wasm_simd", since = "1.54.0")]
2842pub fn i16x8_min(a: v128, b: v128) -> v128 {
2843    let a = a.as_i16x8();
2844    let b = b.as_i16x8();
2845    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2846}
2847
2848#[inline]
2851#[cfg_attr(test, assert_instr(i16x8.min_u))]
2852#[target_feature(enable = "simd128")]
2853#[doc(alias("i16x8.min_u"))]
2854#[stable(feature = "wasm_simd", since = "1.54.0")]
2855pub fn u16x8_min(a: v128, b: v128) -> v128 {
2856    let a = a.as_u16x8();
2857    let b = b.as_u16x8();
2858    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2859}
2860
2861#[inline]
2864#[cfg_attr(test, assert_instr(i16x8.max_s))]
2865#[target_feature(enable = "simd128")]
2866#[doc(alias("i16x8.max_s"))]
2867#[stable(feature = "wasm_simd", since = "1.54.0")]
2868pub fn i16x8_max(a: v128, b: v128) -> v128 {
2869    let a = a.as_i16x8();
2870    let b = b.as_i16x8();
2871    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2872}
2873
2874#[inline]
2877#[cfg_attr(test, assert_instr(i16x8.max_u))]
2878#[target_feature(enable = "simd128")]
2879#[doc(alias("i16x8.max_u"))]
2880#[stable(feature = "wasm_simd", since = "1.54.0")]
2881pub fn u16x8_max(a: v128, b: v128) -> v128 {
2882    let a = a.as_u16x8();
2883    let b = b.as_u16x8();
2884    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2885}
2886
2887#[inline]
2889#[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2890#[target_feature(enable = "simd128")]
2891#[doc(alias("i16x8.avgr_u"))]
2892#[stable(feature = "wasm_simd", since = "1.54.0")]
2893pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2894    unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2895}
2896
2897#[inline]
2902#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2903#[target_feature(enable = "simd128")]
2904#[doc(alias("i16x8.extmul_low_i8x16_s"))]
2905#[stable(feature = "wasm_simd", since = "1.54.0")]
2906pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2907    unsafe {
2908        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2909            a.as_i8x16(),
2910            a.as_i8x16(),
2911            [0, 1, 2, 3, 4, 5, 6, 7],
2912        ));
2913        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2914            b.as_i8x16(),
2915            b.as_i8x16(),
2916            [0, 1, 2, 3, 4, 5, 6, 7],
2917        ));
2918        simd_mul(lhs, rhs).v128()
2919    }
2920}
2921
2922#[inline]
2927#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2928#[target_feature(enable = "simd128")]
2929#[doc(alias("i16x8.extmul_high_i8x16_s"))]
2930#[stable(feature = "wasm_simd", since = "1.54.0")]
2931pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2932    unsafe {
2933        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2934            a.as_i8x16(),
2935            a.as_i8x16(),
2936            [8, 9, 10, 11, 12, 13, 14, 15],
2937        ));
2938        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2939            b.as_i8x16(),
2940            b.as_i8x16(),
2941            [8, 9, 10, 11, 12, 13, 14, 15],
2942        ));
2943        simd_mul(lhs, rhs).v128()
2944    }
2945}
2946
2947#[inline]
2952#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
2953#[target_feature(enable = "simd128")]
2954#[doc(alias("i16x8.extmul_low_i8x16_u"))]
2955#[stable(feature = "wasm_simd", since = "1.54.0")]
2956pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
2957    unsafe {
2958        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2959            a.as_u8x16(),
2960            a.as_u8x16(),
2961            [0, 1, 2, 3, 4, 5, 6, 7],
2962        ));
2963        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2964            b.as_u8x16(),
2965            b.as_u8x16(),
2966            [0, 1, 2, 3, 4, 5, 6, 7],
2967        ));
2968        simd_mul(lhs, rhs).v128()
2969    }
2970}
2971
2972#[stable(feature = "wasm_simd", since = "1.54.0")]
2973pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
2974
2975#[inline]
2980#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
2981#[target_feature(enable = "simd128")]
2982#[doc(alias("i16x8.extmul_high_i8x16_u"))]
2983#[stable(feature = "wasm_simd", since = "1.54.0")]
2984pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
2985    unsafe {
2986        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2987            a.as_u8x16(),
2988            a.as_u8x16(),
2989            [8, 9, 10, 11, 12, 13, 14, 15],
2990        ));
2991        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2992            b.as_u8x16(),
2993            b.as_u8x16(),
2994            [8, 9, 10, 11, 12, 13, 14, 15],
2995        ));
2996        simd_mul(lhs, rhs).v128()
2997    }
2998}
2999
3000#[stable(feature = "wasm_simd", since = "1.54.0")]
3001pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
3002
3003#[inline]
3006#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
3007#[target_feature(enable = "simd128")]
3008#[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
3009#[stable(feature = "wasm_simd", since = "1.54.0")]
3010pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
3011    unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
3012}
3013
3014#[inline]
3017#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
3018#[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
3019#[target_feature(enable = "simd128")]
3020#[stable(feature = "wasm_simd", since = "1.54.0")]
3021pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
3022    unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
3023}
3024
3025#[stable(feature = "wasm_simd", since = "1.54.0")]
3026pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
3027
3028#[inline]
3030#[cfg_attr(test, assert_instr(i32x4.abs))]
3031#[target_feature(enable = "simd128")]
3032#[doc(alias("i32x4.abs"))]
3033#[stable(feature = "wasm_simd", since = "1.54.0")]
3034pub fn i32x4_abs(a: v128) -> v128 {
3035    let a = a.as_i32x4();
3036    let zero = simd::i32x4::ZERO;
3037    unsafe {
3038        simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3039    }
3040}
3041
3042#[inline]
3044#[cfg_attr(test, assert_instr(i32x4.neg))]
3045#[target_feature(enable = "simd128")]
3046#[doc(alias("i32x4.neg"))]
3047#[stable(feature = "wasm_simd", since = "1.54.0")]
3048pub fn i32x4_neg(a: v128) -> v128 {
3049    unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3050}
3051
3052#[inline]
3054#[cfg_attr(test, assert_instr(i32x4.all_true))]
3055#[target_feature(enable = "simd128")]
3056#[doc(alias("i32x4.all_true"))]
3057#[stable(feature = "wasm_simd", since = "1.54.0")]
3058pub fn i32x4_all_true(a: v128) -> bool {
3059    unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3060}
3061
3062#[stable(feature = "wasm_simd", since = "1.54.0")]
3063pub use i32x4_all_true as u32x4_all_true;
3064
3065#[inline]
3068#[cfg_attr(test, assert_instr(i32x4.bitmask))]
3069#[target_feature(enable = "simd128")]
3070#[doc(alias("i32x4.bitmask"))]
3071#[stable(feature = "wasm_simd", since = "1.54.0")]
3072pub fn i32x4_bitmask(a: v128) -> u8 {
3073    unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3074}
3075
3076#[stable(feature = "wasm_simd", since = "1.54.0")]
3077pub use i32x4_bitmask as u32x4_bitmask;
3078
3079#[inline]
3082#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3083#[target_feature(enable = "simd128")]
3084#[doc(alias("i32x4.extend_low_i16x8_s"))]
3085#[stable(feature = "wasm_simd", since = "1.54.0")]
3086pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3087    unsafe {
3088        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3089            a.as_i16x8(),
3090            a.as_i16x8(),
3091            [0, 1, 2, 3]
3092        ))
3093        .v128()
3094    }
3095}
3096
3097#[inline]
3100#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3101#[target_feature(enable = "simd128")]
3102#[doc(alias("i32x4.extend_high_i16x8_s"))]
3103#[stable(feature = "wasm_simd", since = "1.54.0")]
3104pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3105    unsafe {
3106        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3107            a.as_i16x8(),
3108            a.as_i16x8(),
3109            [4, 5, 6, 7]
3110        ))
3111        .v128()
3112    }
3113}
3114
3115#[inline]
3118#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3119#[target_feature(enable = "simd128")]
3120#[doc(alias("i32x4.extend_low_i16x8_u"))]
3121#[stable(feature = "wasm_simd", since = "1.54.0")]
3122pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3123    unsafe {
3124        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3125            a.as_u16x8(),
3126            a.as_u16x8(),
3127            [0, 1, 2, 3]
3128        ))
3129        .v128()
3130    }
3131}
3132
3133#[stable(feature = "wasm_simd", since = "1.54.0")]
3134pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3135
3136#[inline]
3139#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3140#[target_feature(enable = "simd128")]
3141#[doc(alias("i32x4.extend_high_i16x8_u"))]
3142#[stable(feature = "wasm_simd", since = "1.54.0")]
3143pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3144    unsafe {
3145        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3146            a.as_u16x8(),
3147            a.as_u16x8(),
3148            [4, 5, 6, 7]
3149        ))
3150        .v128()
3151    }
3152}
3153
3154#[stable(feature = "wasm_simd", since = "1.54.0")]
3155pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3156
3157#[inline]
3162#[cfg_attr(test, assert_instr(i32x4.shl))]
3163#[target_feature(enable = "simd128")]
3164#[doc(alias("i32x4.shl"))]
3165#[stable(feature = "wasm_simd", since = "1.54.0")]
3166pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3167    unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3170}
3171
3172#[stable(feature = "wasm_simd", since = "1.54.0")]
3173pub use i32x4_shl as u32x4_shl;
3174
3175#[inline]
3181#[cfg_attr(test, assert_instr(i32x4.shr_s))]
3182#[target_feature(enable = "simd128")]
3183#[doc(alias("i32x4.shr_s"))]
3184#[stable(feature = "wasm_simd", since = "1.54.0")]
3185pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3186    unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3189}
3190
3191#[inline]
3197#[cfg_attr(test, assert_instr(i32x4.shr_u))]
3198#[target_feature(enable = "simd128")]
3199#[doc(alias("i32x4.shr_u"))]
3200#[stable(feature = "wasm_simd", since = "1.54.0")]
3201pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3202    unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt & 0x1f)).v128() }
3205}
3206
3207#[inline]
3209#[cfg_attr(test, assert_instr(i32x4.add))]
3210#[target_feature(enable = "simd128")]
3211#[doc(alias("i32x4.add"))]
3212#[stable(feature = "wasm_simd", since = "1.54.0")]
3213pub fn i32x4_add(a: v128, b: v128) -> v128 {
3214    unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3215}
3216
3217#[stable(feature = "wasm_simd", since = "1.54.0")]
3218pub use i32x4_add as u32x4_add;
3219
3220#[inline]
3222#[cfg_attr(test, assert_instr(i32x4.sub))]
3223#[target_feature(enable = "simd128")]
3224#[doc(alias("i32x4.sub"))]
3225#[stable(feature = "wasm_simd", since = "1.54.0")]
3226pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3227    unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3228}
3229
3230#[stable(feature = "wasm_simd", since = "1.54.0")]
3231pub use i32x4_sub as u32x4_sub;
3232
3233#[inline]
3236#[cfg_attr(test, assert_instr(i32x4.mul))]
3237#[target_feature(enable = "simd128")]
3238#[doc(alias("i32x4.mul"))]
3239#[stable(feature = "wasm_simd", since = "1.54.0")]
3240pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3241    unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3242}
3243
3244#[stable(feature = "wasm_simd", since = "1.54.0")]
3245pub use i32x4_mul as u32x4_mul;
3246
3247#[inline]
3250#[cfg_attr(test, assert_instr(i32x4.min_s))]
3251#[target_feature(enable = "simd128")]
3252#[doc(alias("i32x4.min_s"))]
3253#[stable(feature = "wasm_simd", since = "1.54.0")]
3254pub fn i32x4_min(a: v128, b: v128) -> v128 {
3255    let a = a.as_i32x4();
3256    let b = b.as_i32x4();
3257    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3258}
3259
3260#[inline]
3263#[cfg_attr(test, assert_instr(i32x4.min_u))]
3264#[target_feature(enable = "simd128")]
3265#[doc(alias("i32x4.min_u"))]
3266#[stable(feature = "wasm_simd", since = "1.54.0")]
3267pub fn u32x4_min(a: v128, b: v128) -> v128 {
3268    let a = a.as_u32x4();
3269    let b = b.as_u32x4();
3270    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3271}
3272
3273#[inline]
3276#[cfg_attr(test, assert_instr(i32x4.max_s))]
3277#[target_feature(enable = "simd128")]
3278#[doc(alias("i32x4.max_s"))]
3279#[stable(feature = "wasm_simd", since = "1.54.0")]
3280pub fn i32x4_max(a: v128, b: v128) -> v128 {
3281    let a = a.as_i32x4();
3282    let b = b.as_i32x4();
3283    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3284}
3285
3286#[inline]
3289#[cfg_attr(test, assert_instr(i32x4.max_u))]
3290#[target_feature(enable = "simd128")]
3291#[doc(alias("i32x4.max_u"))]
3292#[stable(feature = "wasm_simd", since = "1.54.0")]
3293pub fn u32x4_max(a: v128, b: v128) -> v128 {
3294    let a = a.as_u32x4();
3295    let b = b.as_u32x4();
3296    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3297}
3298
3299#[inline]
3302#[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3303#[target_feature(enable = "simd128")]
3304#[doc(alias("i32x4.dot_i16x8_s"))]
3305#[stable(feature = "wasm_simd", since = "1.54.0")]
3306pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3307    unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3308}
3309
3310#[inline]
3315#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3316#[target_feature(enable = "simd128")]
3317#[doc(alias("i32x4.extmul_low_i16x8_s"))]
3318#[stable(feature = "wasm_simd", since = "1.54.0")]
3319pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3320    unsafe {
3321        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3322            a.as_i16x8(),
3323            a.as_i16x8(),
3324            [0, 1, 2, 3]
3325        ));
3326        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3327            b.as_i16x8(),
3328            b.as_i16x8(),
3329            [0, 1, 2, 3]
3330        ));
3331        simd_mul(lhs, rhs).v128()
3332    }
3333}
3334
3335#[inline]
3340#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3341#[target_feature(enable = "simd128")]
3342#[doc(alias("i32x4.extmul_high_i16x8_s"))]
3343#[stable(feature = "wasm_simd", since = "1.54.0")]
3344pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3345    unsafe {
3346        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3347            a.as_i16x8(),
3348            a.as_i16x8(),
3349            [4, 5, 6, 7]
3350        ));
3351        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3352            b.as_i16x8(),
3353            b.as_i16x8(),
3354            [4, 5, 6, 7]
3355        ));
3356        simd_mul(lhs, rhs).v128()
3357    }
3358}
3359
3360#[inline]
3365#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3366#[target_feature(enable = "simd128")]
3367#[doc(alias("i32x4.extmul_low_i16x8_u"))]
3368#[stable(feature = "wasm_simd", since = "1.54.0")]
3369pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3370    unsafe {
3371        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3372            a.as_u16x8(),
3373            a.as_u16x8(),
3374            [0, 1, 2, 3]
3375        ));
3376        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3377            b.as_u16x8(),
3378            b.as_u16x8(),
3379            [0, 1, 2, 3]
3380        ));
3381        simd_mul(lhs, rhs).v128()
3382    }
3383}
3384
3385#[stable(feature = "wasm_simd", since = "1.54.0")]
3386pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3387
3388#[inline]
3393#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3394#[target_feature(enable = "simd128")]
3395#[doc(alias("i32x4.extmul_high_i16x8_u"))]
3396#[stable(feature = "wasm_simd", since = "1.54.0")]
3397pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3398    unsafe {
3399        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3400            a.as_u16x8(),
3401            a.as_u16x8(),
3402            [4, 5, 6, 7]
3403        ));
3404        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3405            b.as_u16x8(),
3406            b.as_u16x8(),
3407            [4, 5, 6, 7]
3408        ));
3409        simd_mul(lhs, rhs).v128()
3410    }
3411}
3412
3413#[stable(feature = "wasm_simd", since = "1.54.0")]
3414pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3415
3416#[inline]
3418#[cfg_attr(test, assert_instr(i64x2.abs))]
3419#[target_feature(enable = "simd128")]
3420#[doc(alias("i64x2.abs"))]
3421#[stable(feature = "wasm_simd", since = "1.54.0")]
3422pub fn i64x2_abs(a: v128) -> v128 {
3423    let a = a.as_i64x2();
3424    let zero = simd::i64x2::ZERO;
3425    unsafe {
3426        simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3427    }
3428}
3429
3430#[inline]
3432#[cfg_attr(test, assert_instr(i64x2.neg))]
3433#[target_feature(enable = "simd128")]
3434#[doc(alias("i64x2.neg"))]
3435#[stable(feature = "wasm_simd", since = "1.54.0")]
3436pub fn i64x2_neg(a: v128) -> v128 {
3437    unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3438}
3439
3440#[inline]
3442#[cfg_attr(test, assert_instr(i64x2.all_true))]
3443#[target_feature(enable = "simd128")]
3444#[doc(alias("i64x2.all_true"))]
3445#[stable(feature = "wasm_simd", since = "1.54.0")]
3446pub fn i64x2_all_true(a: v128) -> bool {
3447    unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3448}
3449
3450#[stable(feature = "wasm_simd", since = "1.54.0")]
3451pub use i64x2_all_true as u64x2_all_true;
3452
3453#[inline]
3456#[cfg_attr(test, assert_instr(i64x2.bitmask))]
3457#[target_feature(enable = "simd128")]
3458#[doc(alias("i64x2.bitmask"))]
3459#[stable(feature = "wasm_simd", since = "1.54.0")]
3460pub fn i64x2_bitmask(a: v128) -> u8 {
3461    unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3462}
3463
3464#[stable(feature = "wasm_simd", since = "1.54.0")]
3465pub use i64x2_bitmask as u64x2_bitmask;
3466
3467#[inline]
3470#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3471#[target_feature(enable = "simd128")]
3472#[doc(alias("i64x2.extend_low_i32x4_s"))]
3473#[stable(feature = "wasm_simd", since = "1.54.0")]
3474pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3475    unsafe {
3476        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3477            .v128()
3478    }
3479}
3480
3481#[inline]
3484#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3485#[target_feature(enable = "simd128")]
3486#[doc(alias("i64x2.extend_high_i32x4_s"))]
3487#[stable(feature = "wasm_simd", since = "1.54.0")]
3488pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3489    unsafe {
3490        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3491            .v128()
3492    }
3493}
3494
3495#[inline]
3498#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3499#[target_feature(enable = "simd128")]
3500#[doc(alias("i64x2.extend_low_i32x4_u"))]
3501#[stable(feature = "wasm_simd", since = "1.54.0")]
3502pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3503    unsafe {
3504        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3505            .v128()
3506    }
3507}
3508
3509#[stable(feature = "wasm_simd", since = "1.54.0")]
3510pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3511
3512#[inline]
3515#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3516#[target_feature(enable = "simd128")]
3517#[doc(alias("i64x2.extend_high_i32x4_u"))]
3518#[stable(feature = "wasm_simd", since = "1.54.0")]
3519pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3520    unsafe {
3521        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3522            .v128()
3523    }
3524}
3525
3526#[stable(feature = "wasm_simd", since = "1.54.0")]
3527pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3528
3529#[inline]
3534#[cfg_attr(test, assert_instr(i64x2.shl))]
3535#[target_feature(enable = "simd128")]
3536#[doc(alias("i64x2.shl"))]
3537#[stable(feature = "wasm_simd", since = "1.54.0")]
3538pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3539    unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3542}
3543
3544#[stable(feature = "wasm_simd", since = "1.54.0")]
3545pub use i64x2_shl as u64x2_shl;
3546
3547#[inline]
3553#[cfg_attr(test, assert_instr(i64x2.shr_s))]
3554#[target_feature(enable = "simd128")]
3555#[doc(alias("i64x2.shr_s"))]
3556#[stable(feature = "wasm_simd", since = "1.54.0")]
3557pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3558    unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3561}
3562
3563#[inline]
3569#[cfg_attr(test, assert_instr(i64x2.shr_u))]
3570#[target_feature(enable = "simd128")]
3571#[doc(alias("i64x2.shr_u"))]
3572#[stable(feature = "wasm_simd", since = "1.54.0")]
3573pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3574    unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat((amt & 0x3f) as u64)).v128() }
3577}
3578
3579#[inline]
3581#[cfg_attr(test, assert_instr(i64x2.add))]
3582#[target_feature(enable = "simd128")]
3583#[doc(alias("i64x2.add"))]
3584#[stable(feature = "wasm_simd", since = "1.54.0")]
3585pub fn i64x2_add(a: v128, b: v128) -> v128 {
3586    unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3587}
3588
3589#[stable(feature = "wasm_simd", since = "1.54.0")]
3590pub use i64x2_add as u64x2_add;
3591
3592#[inline]
3594#[cfg_attr(test, assert_instr(i64x2.sub))]
3595#[target_feature(enable = "simd128")]
3596#[doc(alias("i64x2.sub"))]
3597#[stable(feature = "wasm_simd", since = "1.54.0")]
3598pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3599    unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3600}
3601
3602#[stable(feature = "wasm_simd", since = "1.54.0")]
3603pub use i64x2_sub as u64x2_sub;
3604
3605#[inline]
3607#[cfg_attr(test, assert_instr(i64x2.mul))]
3608#[target_feature(enable = "simd128")]
3609#[doc(alias("i64x2.mul"))]
3610#[stable(feature = "wasm_simd", since = "1.54.0")]
3611pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3612    unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3613}
3614
3615#[stable(feature = "wasm_simd", since = "1.54.0")]
3616pub use i64x2_mul as u64x2_mul;
3617
3618#[inline]
3623#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3624#[target_feature(enable = "simd128")]
3625#[doc(alias("i64x2.extmul_low_i32x4_s"))]
3626#[stable(feature = "wasm_simd", since = "1.54.0")]
3627pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3628    unsafe {
3629        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3630            a.as_i32x4(),
3631            a.as_i32x4(),
3632            [0, 1]
3633        ));
3634        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3635            b.as_i32x4(),
3636            b.as_i32x4(),
3637            [0, 1]
3638        ));
3639        simd_mul(lhs, rhs).v128()
3640    }
3641}
3642
3643#[inline]
3648#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3649#[target_feature(enable = "simd128")]
3650#[doc(alias("i64x2.extmul_high_i32x4_s"))]
3651#[stable(feature = "wasm_simd", since = "1.54.0")]
3652pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3653    unsafe {
3654        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3655            a.as_i32x4(),
3656            a.as_i32x4(),
3657            [2, 3]
3658        ));
3659        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3660            b.as_i32x4(),
3661            b.as_i32x4(),
3662            [2, 3]
3663        ));
3664        simd_mul(lhs, rhs).v128()
3665    }
3666}
3667
3668#[inline]
3673#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3674#[target_feature(enable = "simd128")]
3675#[doc(alias("i64x2.extmul_low_i32x4_u"))]
3676#[stable(feature = "wasm_simd", since = "1.54.0")]
3677pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3678    unsafe {
3679        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3680            a.as_u32x4(),
3681            a.as_u32x4(),
3682            [0, 1]
3683        ));
3684        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3685            b.as_u32x4(),
3686            b.as_u32x4(),
3687            [0, 1]
3688        ));
3689        simd_mul(lhs, rhs).v128()
3690    }
3691}
3692
3693#[stable(feature = "wasm_simd", since = "1.54.0")]
3694pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3695
3696#[inline]
3701#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3702#[target_feature(enable = "simd128")]
3703#[doc(alias("i64x2.extmul_high_i32x4_u"))]
3704#[stable(feature = "wasm_simd", since = "1.54.0")]
3705pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3706    unsafe {
3707        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3708            a.as_u32x4(),
3709            a.as_u32x4(),
3710            [2, 3]
3711        ));
3712        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3713            b.as_u32x4(),
3714            b.as_u32x4(),
3715            [2, 3]
3716        ));
3717        simd_mul(lhs, rhs).v128()
3718    }
3719}
3720
3721#[stable(feature = "wasm_simd", since = "1.54.0")]
3722pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3723
3724#[inline]
3726#[cfg_attr(test, assert_instr(f32x4.ceil))]
3727#[target_feature(enable = "simd128")]
3728#[doc(alias("f32x4.ceil"))]
3729#[stable(feature = "wasm_simd", since = "1.54.0")]
3730pub fn f32x4_ceil(a: v128) -> v128 {
3731    unsafe { simd_ceil(a.as_f32x4()).v128() }
3732}
3733
3734#[inline]
3736#[cfg_attr(test, assert_instr(f32x4.floor))]
3737#[target_feature(enable = "simd128")]
3738#[doc(alias("f32x4.floor"))]
3739#[stable(feature = "wasm_simd", since = "1.54.0")]
3740pub fn f32x4_floor(a: v128) -> v128 {
3741    unsafe { simd_floor(a.as_f32x4()).v128() }
3742}
3743
3744#[inline]
3747#[cfg_attr(test, assert_instr(f32x4.trunc))]
3748#[target_feature(enable = "simd128")]
3749#[doc(alias("f32x4.trunc"))]
3750#[stable(feature = "wasm_simd", since = "1.54.0")]
3751pub fn f32x4_trunc(a: v128) -> v128 {
3752    unsafe { simd_trunc(a.as_f32x4()).v128() }
3753}
3754
3755#[inline]
3758#[cfg_attr(test, assert_instr(f32x4.nearest))]
3759#[target_feature(enable = "simd128")]
3760#[doc(alias("f32x4.nearest"))]
3761#[stable(feature = "wasm_simd", since = "1.54.0")]
3762pub fn f32x4_nearest(a: v128) -> v128 {
3763    unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3764}
3765
3766#[inline]
3769#[cfg_attr(test, assert_instr(f32x4.abs))]
3770#[target_feature(enable = "simd128")]
3771#[doc(alias("f32x4.abs"))]
3772#[stable(feature = "wasm_simd", since = "1.54.0")]
3773pub fn f32x4_abs(a: v128) -> v128 {
3774    unsafe { simd_fabs(a.as_f32x4()).v128() }
3775}
3776
3777#[inline]
3780#[cfg_attr(test, assert_instr(f32x4.neg))]
3781#[target_feature(enable = "simd128")]
3782#[doc(alias("f32x4.neg"))]
3783#[stable(feature = "wasm_simd", since = "1.54.0")]
3784pub fn f32x4_neg(a: v128) -> v128 {
3785    unsafe { simd_neg(a.as_f32x4()).v128() }
3786}
3787
3788#[inline]
3791#[cfg_attr(test, assert_instr(f32x4.sqrt))]
3792#[target_feature(enable = "simd128")]
3793#[doc(alias("f32x4.sqrt"))]
3794#[stable(feature = "wasm_simd", since = "1.54.0")]
3795pub fn f32x4_sqrt(a: v128) -> v128 {
3796    unsafe { simd_fsqrt(a.as_f32x4()).v128() }
3797}
3798
3799#[inline]
3802#[cfg_attr(test, assert_instr(f32x4.add))]
3803#[target_feature(enable = "simd128")]
3804#[doc(alias("f32x4.add"))]
3805#[stable(feature = "wasm_simd", since = "1.54.0")]
3806pub fn f32x4_add(a: v128, b: v128) -> v128 {
3807    unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3808}
3809
3810#[inline]
3813#[cfg_attr(test, assert_instr(f32x4.sub))]
3814#[target_feature(enable = "simd128")]
3815#[doc(alias("f32x4.sub"))]
3816#[stable(feature = "wasm_simd", since = "1.54.0")]
3817pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3818    unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3819}
3820
3821#[inline]
3824#[cfg_attr(test, assert_instr(f32x4.mul))]
3825#[target_feature(enable = "simd128")]
3826#[doc(alias("f32x4.mul"))]
3827#[stable(feature = "wasm_simd", since = "1.54.0")]
3828pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3829    unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3830}
3831
3832#[inline]
3835#[cfg_attr(test, assert_instr(f32x4.div))]
3836#[target_feature(enable = "simd128")]
3837#[doc(alias("f32x4.div"))]
3838#[stable(feature = "wasm_simd", since = "1.54.0")]
3839pub fn f32x4_div(a: v128, b: v128) -> v128 {
3840    unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3841}
3842
3843#[inline]
3846#[cfg_attr(test, assert_instr(f32x4.min))]
3847#[target_feature(enable = "simd128")]
3848#[doc(alias("f32x4.min"))]
3849#[stable(feature = "wasm_simd", since = "1.54.0")]
3850pub fn f32x4_min(a: v128, b: v128) -> v128 {
3851    unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3852}
3853
3854#[inline]
3857#[cfg_attr(test, assert_instr(f32x4.max))]
3858#[target_feature(enable = "simd128")]
3859#[doc(alias("f32x4.max"))]
3860#[stable(feature = "wasm_simd", since = "1.54.0")]
3861pub fn f32x4_max(a: v128, b: v128) -> v128 {
3862    unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3863}
3864
3865#[inline]
3867#[cfg_attr(test, assert_instr(f32x4.pmin))]
3868#[target_feature(enable = "simd128")]
3869#[doc(alias("f32x4.pmin"))]
3870#[stable(feature = "wasm_simd", since = "1.54.0")]
3871pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3872    unsafe {
3873        simd_select::<simd::m32x4, simd::f32x4>(
3874            simd_lt(b.as_f32x4(), a.as_f32x4()),
3875            b.as_f32x4(),
3876            a.as_f32x4(),
3877        )
3878        .v128()
3879    }
3880}
3881
3882#[inline]
3884#[cfg_attr(test, assert_instr(f32x4.pmax))]
3885#[target_feature(enable = "simd128")]
3886#[doc(alias("f32x4.pmax"))]
3887#[stable(feature = "wasm_simd", since = "1.54.0")]
3888pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3889    unsafe {
3890        simd_select::<simd::m32x4, simd::f32x4>(
3891            simd_lt(a.as_f32x4(), b.as_f32x4()),
3892            b.as_f32x4(),
3893            a.as_f32x4(),
3894        )
3895        .v128()
3896    }
3897}
3898
3899#[inline]
3901#[cfg_attr(test, assert_instr(f64x2.ceil))]
3902#[target_feature(enable = "simd128")]
3903#[doc(alias("f64x2.ceil"))]
3904#[stable(feature = "wasm_simd", since = "1.54.0")]
3905pub fn f64x2_ceil(a: v128) -> v128 {
3906    unsafe { simd_ceil(a.as_f64x2()).v128() }
3907}
3908
3909#[inline]
3911#[cfg_attr(test, assert_instr(f64x2.floor))]
3912#[target_feature(enable = "simd128")]
3913#[doc(alias("f64x2.floor"))]
3914#[stable(feature = "wasm_simd", since = "1.54.0")]
3915pub fn f64x2_floor(a: v128) -> v128 {
3916    unsafe { simd_floor(a.as_f64x2()).v128() }
3917}
3918
3919#[inline]
3922#[cfg_attr(test, assert_instr(f64x2.trunc))]
3923#[target_feature(enable = "simd128")]
3924#[doc(alias("f64x2.trunc"))]
3925#[stable(feature = "wasm_simd", since = "1.54.0")]
3926pub fn f64x2_trunc(a: v128) -> v128 {
3927    unsafe { simd_trunc(a.as_f64x2()).v128() }
3928}
3929
3930#[inline]
3933#[cfg_attr(test, assert_instr(f64x2.nearest))]
3934#[target_feature(enable = "simd128")]
3935#[doc(alias("f64x2.nearest"))]
3936#[stable(feature = "wasm_simd", since = "1.54.0")]
3937pub fn f64x2_nearest(a: v128) -> v128 {
3938    unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3939}
3940
3941#[inline]
3944#[cfg_attr(test, assert_instr(f64x2.abs))]
3945#[target_feature(enable = "simd128")]
3946#[doc(alias("f64x2.abs"))]
3947#[stable(feature = "wasm_simd", since = "1.54.0")]
3948pub fn f64x2_abs(a: v128) -> v128 {
3949    unsafe { simd_fabs(a.as_f64x2()).v128() }
3950}
3951
3952#[inline]
3955#[cfg_attr(test, assert_instr(f64x2.neg))]
3956#[target_feature(enable = "simd128")]
3957#[doc(alias("f64x2.neg"))]
3958#[stable(feature = "wasm_simd", since = "1.54.0")]
3959pub fn f64x2_neg(a: v128) -> v128 {
3960    unsafe { simd_neg(a.as_f64x2()).v128() }
3961}
3962
3963#[inline]
3966#[cfg_attr(test, assert_instr(f64x2.sqrt))]
3967#[target_feature(enable = "simd128")]
3968#[doc(alias("f64x2.sqrt"))]
3969#[stable(feature = "wasm_simd", since = "1.54.0")]
3970pub fn f64x2_sqrt(a: v128) -> v128 {
3971    unsafe { simd_fsqrt(a.as_f64x2()).v128() }
3972}
3973
3974#[inline]
3977#[cfg_attr(test, assert_instr(f64x2.add))]
3978#[target_feature(enable = "simd128")]
3979#[doc(alias("f64x2.add"))]
3980#[stable(feature = "wasm_simd", since = "1.54.0")]
3981pub fn f64x2_add(a: v128, b: v128) -> v128 {
3982    unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
3983}
3984
3985#[inline]
3988#[cfg_attr(test, assert_instr(f64x2.sub))]
3989#[target_feature(enable = "simd128")]
3990#[doc(alias("f64x2.sub"))]
3991#[stable(feature = "wasm_simd", since = "1.54.0")]
3992pub fn f64x2_sub(a: v128, b: v128) -> v128 {
3993    unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
3994}
3995
3996#[inline]
3999#[cfg_attr(test, assert_instr(f64x2.mul))]
4000#[target_feature(enable = "simd128")]
4001#[doc(alias("f64x2.mul"))]
4002#[stable(feature = "wasm_simd", since = "1.54.0")]
4003pub fn f64x2_mul(a: v128, b: v128) -> v128 {
4004    unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
4005}
4006
4007#[inline]
4010#[cfg_attr(test, assert_instr(f64x2.div))]
4011#[target_feature(enable = "simd128")]
4012#[doc(alias("f64x2.div"))]
4013#[stable(feature = "wasm_simd", since = "1.54.0")]
4014pub fn f64x2_div(a: v128, b: v128) -> v128 {
4015    unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
4016}
4017
4018#[inline]
4021#[cfg_attr(test, assert_instr(f64x2.min))]
4022#[target_feature(enable = "simd128")]
4023#[doc(alias("f64x2.min"))]
4024#[stable(feature = "wasm_simd", since = "1.54.0")]
4025pub fn f64x2_min(a: v128, b: v128) -> v128 {
4026    unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
4027}
4028
4029#[inline]
4032#[cfg_attr(test, assert_instr(f64x2.max))]
4033#[target_feature(enable = "simd128")]
4034#[doc(alias("f64x2.max"))]
4035#[stable(feature = "wasm_simd", since = "1.54.0")]
4036pub fn f64x2_max(a: v128, b: v128) -> v128 {
4037    unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
4038}
4039
4040#[inline]
4042#[cfg_attr(test, assert_instr(f64x2.pmin))]
4043#[target_feature(enable = "simd128")]
4044#[doc(alias("f64x2.pmin"))]
4045#[stable(feature = "wasm_simd", since = "1.54.0")]
4046pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4047    unsafe {
4048        simd_select::<simd::m64x2, simd::f64x2>(
4049            simd_lt(b.as_f64x2(), a.as_f64x2()),
4050            b.as_f64x2(),
4051            a.as_f64x2(),
4052        )
4053        .v128()
4054    }
4055}
4056
4057#[inline]
4059#[cfg_attr(test, assert_instr(f64x2.pmax))]
4060#[target_feature(enable = "simd128")]
4061#[doc(alias("f64x2.pmax"))]
4062#[stable(feature = "wasm_simd", since = "1.54.0")]
4063pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4064    unsafe {
4065        simd_select::<simd::m64x2, simd::f64x2>(
4066            simd_lt(a.as_f64x2(), b.as_f64x2()),
4067            b.as_f64x2(),
4068            a.as_f64x2(),
4069        )
4070        .v128()
4071    }
4072}
4073
4074#[inline]
4080#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4081#[target_feature(enable = "simd128")]
4082#[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4083#[stable(feature = "wasm_simd", since = "1.54.0")]
4084pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4085    unsafe { simd_as::<simd::f32x4, simd::i32x4>(a.as_f32x4()).v128() }
4086}
4087
4088#[inline]
4094#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4095#[target_feature(enable = "simd128")]
4096#[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4097#[stable(feature = "wasm_simd", since = "1.54.0")]
4098pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4099    unsafe { simd_as::<simd::f32x4, simd::u32x4>(a.as_f32x4()).v128() }
4100}
4101
4102#[inline]
4105#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4106#[target_feature(enable = "simd128")]
4107#[doc(alias("f32x4.convert_i32x4_s"))]
4108#[stable(feature = "wasm_simd", since = "1.54.0")]
4109pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4110    unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4111}
4112
4113#[inline]
4116#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4117#[target_feature(enable = "simd128")]
4118#[doc(alias("f32x4.convert_i32x4_u"))]
4119#[stable(feature = "wasm_simd", since = "1.54.0")]
4120pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4121    unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4122}
4123
4124#[inline]
4133#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4134#[target_feature(enable = "simd128")]
4135#[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4136#[stable(feature = "wasm_simd", since = "1.54.0")]
4137pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4138    let ret: simd::i32x4 = unsafe {
4139        simd_shuffle!(
4140            simd_as::<simd::f64x2, simd::i32x2>(a.as_f64x2()),
4141            simd::i32x2::ZERO,
4142            [0, 1, 2, 3],
4143        )
4144    };
4145    ret.v128()
4146}
4147
4148#[inline]
4157#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4158#[target_feature(enable = "simd128")]
4159#[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4160#[stable(feature = "wasm_simd", since = "1.54.0")]
4161pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4162    let ret: simd::u32x4 = unsafe {
4163        simd_shuffle!(
4164            simd_as::<simd::f64x2, simd::u32x2>(a.as_f64x2()),
4165            simd::u32x2::ZERO,
4166            [0, 1, 2, 3],
4167        )
4168    };
4169    ret.v128()
4170}
4171
4172#[inline]
4174#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4175#[target_feature(enable = "simd128")]
4176#[doc(alias("f64x2.convert_low_i32x4_s"))]
4177#[stable(feature = "wasm_simd", since = "1.54.0")]
4178pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4179    unsafe {
4180        simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4181            .v128()
4182    }
4183}
4184
4185#[inline]
4187#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4188#[target_feature(enable = "simd128")]
4189#[doc(alias("f64x2.convert_low_i32x4_u"))]
4190#[stable(feature = "wasm_simd", since = "1.54.0")]
4191pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4192    unsafe {
4193        simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4194            .v128()
4195    }
4196}
4197
4198#[inline]
4204#[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4205#[target_feature(enable = "simd128")]
4206#[doc(alias("f32x4.demote_f64x2_zero"))]
4207#[stable(feature = "wasm_simd", since = "1.54.0")]
4208pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4209    unsafe {
4210        simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
4211            a.as_f64x2(),
4212            simd::f64x2::ZERO,
4213            [0, 1, 2, 3]
4214        ))
4215        .v128()
4216    }
4217}
4218
4219#[inline]
4222#[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4223#[target_feature(enable = "simd128")]
4224#[doc(alias("f32x4.promote_low_f32x4"))]
4225#[stable(feature = "wasm_simd", since = "1.54.0")]
4226pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4227    unsafe {
4228        simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4229            .v128()
4230    }
4231}
4232
4233#[cfg(test)]
4234mod tests {
4235    use super::*;
4236    use core::ops::{Add, Div, Mul, Neg, Sub};
4237
4238    use std::fmt::Debug;
4239    use std::mem::transmute;
4240    use std::num::Wrapping;
4241    use std::prelude::v1::*;
4242
4243    const _C1: v128 = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4244    const _C2: v128 = u8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4245    const _C3: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4246    const _C4: v128 = u16x8(0, 1, 2, 3, 4, 5, 6, 7);
4247    const _C5: v128 = i32x4(0, 1, 2, 3);
4248    const _C6: v128 = u32x4(0, 1, 2, 3);
4249    const _C7: v128 = i64x2(0, 1);
4250    const _C8: v128 = u64x2(0, 1);
4251    const _C9: v128 = f32x4(0.0, 1.0, 2.0, 3.0);
4252    const _C10: v128 = f64x2(0.0, 1.0);
4253
4254    fn compare_bytes(a: v128, b: v128) {
4255        let a: [u8; 16] = unsafe { transmute(a) };
4256        let b: [u8; 16] = unsafe { transmute(b) };
4257        assert_eq!(a, b);
4258    }
4259
4260    #[test]
4261    fn test_load() {
4262        unsafe {
4263            let arr: [i32; 4] = [0, 1, 2, 3];
4264            let vec = v128_load(arr.as_ptr() as *const v128);
4265            compare_bytes(vec, i32x4(0, 1, 2, 3));
4266        }
4267    }
4268
4269    #[test]
4270    fn test_load_extend() {
4271        unsafe {
4272            let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4273            let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4274            compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4275            let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4276            compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4277
4278            let arr: [i16; 4] = [-1, 0, 1, 2];
4279            let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4280            compare_bytes(vec, i32x4(-1, 0, 1, 2));
4281            let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4282            compare_bytes(vec, i32x4(65535, 0, 1, 2));
4283
4284            let arr: [i32; 2] = [-1, 1];
4285            let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4286            compare_bytes(vec, i64x2(-1, 1));
4287            let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4288            compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4289        }
4290    }
4291
4292    #[test]
4293    fn test_load_splat() {
4294        unsafe {
4295            compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4296            compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4297            compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4298            compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4299        }
4300    }
4301
4302    #[test]
4303    fn test_load_zero() {
4304        unsafe {
4305            compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4306            compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4307        }
4308    }
4309
4310    #[test]
4311    fn test_store() {
4312        unsafe {
4313            let mut spot = i8x16_splat(0);
4314            v128_store(&mut spot, i8x16_splat(1));
4315            compare_bytes(spot, i8x16_splat(1));
4316        }
4317    }
4318
4319    #[test]
4320    fn test_load_lane() {
4321        unsafe {
4322            let zero = i8x16_splat(0);
4323            compare_bytes(
4324                v128_load8_lane::<2>(zero, &1),
4325                i8x16_replace_lane::<2>(zero, 1),
4326            );
4327
4328            compare_bytes(
4329                v128_load16_lane::<2>(zero, &1),
4330                i16x8_replace_lane::<2>(zero, 1),
4331            );
4332
4333            compare_bytes(
4334                v128_load32_lane::<2>(zero, &1),
4335                i32x4_replace_lane::<2>(zero, 1),
4336            );
4337
4338            compare_bytes(
4339                v128_load64_lane::<1>(zero, &1),
4340                i64x2_replace_lane::<1>(zero, 1),
4341            );
4342        }
4343    }
4344
4345    #[test]
4346    fn test_store_lane() {
4347        unsafe {
4348            let mut spot = 0;
4349            let zero = i8x16_splat(0);
4350            v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4351            assert_eq!(spot, 7);
4352
4353            let mut spot = 0;
4354            v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4355            assert_eq!(spot, 7);
4356
4357            let mut spot = 0;
4358            v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4359            assert_eq!(spot, 7);
4360
4361            let mut spot = 0;
4362            v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4363            assert_eq!(spot, 7);
4364        }
4365    }
4366
4367    #[test]
4368    fn test_i8x16() {
4369        const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4370        compare_bytes(A, A);
4371
4372        const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4373        const _: v128 = i32x4(0, 1, 2, 3);
4374        const _: v128 = i64x2(0, 1);
4375        const _: v128 = f32x4(0., 1., 2., 3.);
4376        const _: v128 = f64x2(0., 1.);
4377
4378        let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4379        assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4380        let bytes: [i8; 16] = unsafe {
4381            mem::transmute(i8x16(
4382                -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4383            ))
4384        };
4385        assert_eq!(
4386            bytes,
4387            [
4388                -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16
4389            ]
4390        );
4391    }
4392
4393    #[test]
4394    fn test_shuffle() {
4395        let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4396        let vec_b = i8x16(
4397            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4398        );
4399
4400        let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4401            vec_a, vec_b,
4402        );
4403        let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4404        compare_bytes(vec_r, vec_e);
4405
4406        let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4407        let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4408        let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4409        let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4410        compare_bytes(vec_r, vec_e);
4411
4412        let vec_a = i32x4(0, 1, 2, 3);
4413        let vec_b = i32x4(4, 5, 6, 7);
4414        let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4415        let vec_e = i32x4(0, 4, 2, 6);
4416        compare_bytes(vec_r, vec_e);
4417
4418        let vec_a = i64x2(0, 1);
4419        let vec_b = i64x2(2, 3);
4420        let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4421        let vec_e = i64x2(0, 2);
4422        compare_bytes(vec_r, vec_e);
4423    }
4424
4425    macro_rules! test_extract {
4427        (
4428            name: $test_id:ident,
4429            extract: $extract:ident,
4430            replace: $replace:ident,
4431            elem: $elem:ty,
4432            count: $count:expr,
4433            indices: [$($idx:expr),*],
4434        ) => {
4435            #[test]
4436            fn $test_id() {
4437                unsafe {
4438                    let arr: [$elem; $count] = [123 as $elem; $count];
4439                    let vec: v128 = transmute(arr);
4440                    $(
4441                        assert_eq!($extract::<$idx>(vec), 123 as $elem);
4442                    )*
4443
4444                    let arr: [$elem; $count] = [$($idx as $elem),*];
4447                    let vec: v128 = transmute(arr);
4448                    $(
4449                        assert_eq!($extract::<$idx>(vec), $idx as $elem);
4450
4451                        let tmp = $replace::<$idx>(vec, 124 as $elem);
4452                        assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4453                    )*
4454                }
4455            }
4456        }
4457    }
4458
4459    test_extract! {
4460        name: test_i8x16_extract_replace,
4461        extract: i8x16_extract_lane,
4462        replace: i8x16_replace_lane,
4463        elem: i8,
4464        count: 16,
4465        indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4466    }
4467    test_extract! {
4468        name: test_i16x8_extract_replace,
4469        extract: i16x8_extract_lane,
4470        replace: i16x8_replace_lane,
4471        elem: i16,
4472        count: 8,
4473        indices: [0, 1, 2, 3, 4, 5, 6, 7],
4474    }
4475    test_extract! {
4476        name: test_i32x4_extract_replace,
4477        extract: i32x4_extract_lane,
4478        replace: i32x4_replace_lane,
4479        elem: i32,
4480        count: 4,
4481        indices: [0, 1, 2, 3],
4482    }
4483    test_extract! {
4484        name: test_i64x2_extract_replace,
4485        extract: i64x2_extract_lane,
4486        replace: i64x2_replace_lane,
4487        elem: i64,
4488        count: 2,
4489        indices: [0, 1],
4490    }
4491    test_extract! {
4492        name: test_f32x4_extract_replace,
4493        extract: f32x4_extract_lane,
4494        replace: f32x4_replace_lane,
4495        elem: f32,
4496        count: 4,
4497        indices: [0, 1, 2, 3],
4498    }
4499    test_extract! {
4500        name: test_f64x2_extract_replace,
4501        extract: f64x2_extract_lane,
4502        replace: f64x2_replace_lane,
4503        elem: f64,
4504        count: 2,
4505        indices: [0, 1],
4506    }
4507
4508    #[test]
4509    #[rustfmt::skip]
4510    fn test_swizzle() {
4511        compare_bytes(
4512            i8x16_swizzle(
4513                i32x4(1, 2, 3, 4),
4514                i8x16(
4515                    32, 31, 30, 29,
4516                    0, 1, 2, 3,
4517                    12, 13, 14, 15,
4518                    0, 4, 8, 12),
4519            ),
4520            i32x4(0, 1, 4, 0x04030201),
4521        );
4522    }
4523
4524    macro_rules! test_splat {
4525        ($test_id:ident: $val:expr => $($vals:expr),*) => {
4526            #[test]
4527            fn $test_id() {
4528                let a = super::$test_id($val);
4529                let b = u8x16($($vals as u8),*);
4530                compare_bytes(a, b);
4531            }
4532        }
4533    }
4534
4535    mod splats {
4536        use super::*;
4537        test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4538        test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4539        test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4540        test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4541        test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4542        test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4543    }
4544
4545    #[test]
4546    fn test_bitmasks() {
4547        let zero = i8x16_splat(0);
4548        let ones = i8x16_splat(!0);
4549
4550        assert_eq!(i8x16_bitmask(zero), 0);
4551        assert_eq!(i8x16_bitmask(ones), 0xffff);
4552        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4553        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4554        assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4555
4556        assert_eq!(i16x8_bitmask(zero), 0);
4557        assert_eq!(i16x8_bitmask(ones), 0xff);
4558        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4559        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4560        assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4561
4562        assert_eq!(i32x4_bitmask(zero), 0);
4563        assert_eq!(i32x4_bitmask(ones), 0b1111);
4564        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4565        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4566        assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4567
4568        assert_eq!(i64x2_bitmask(zero), 0);
4569        assert_eq!(i64x2_bitmask(ones), 0b11);
4570        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4571        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4572        assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4573    }
4574
4575    #[test]
4576    fn test_narrow() {
4577        let zero = i8x16_splat(0);
4578        let ones = i8x16_splat(!0);
4579
4580        compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4581        compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4582        compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4583        compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4584
4585        compare_bytes(
4586            i8x16_narrow_i16x8(
4587                i16x8(
4588                    0,
4589                    1,
4590                    2,
4591                    -1,
4592                    i8::MIN.into(),
4593                    i8::MAX.into(),
4594                    u8::MIN.into(),
4595                    u8::MAX.into(),
4596                ),
4597                i16x8(
4598                    i16::MIN,
4599                    i16::MAX,
4600                    u16::MIN as i16,
4601                    u16::MAX as i16,
4602                    0,
4603                    0,
4604                    0,
4605                    0,
4606                ),
4607            ),
4608            i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4609        );
4610
4611        compare_bytes(
4612            u8x16_narrow_i16x8(
4613                i16x8(
4614                    0,
4615                    1,
4616                    2,
4617                    -1,
4618                    i8::MIN.into(),
4619                    i8::MAX.into(),
4620                    u8::MIN.into(),
4621                    u8::MAX.into(),
4622                ),
4623                i16x8(
4624                    i16::MIN,
4625                    i16::MAX,
4626                    u16::MIN as i16,
4627                    u16::MAX as i16,
4628                    0,
4629                    0,
4630                    0,
4631                    0,
4632                ),
4633            ),
4634            i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4635        );
4636
4637        compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4638        compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4639        compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4640        compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4641
4642        compare_bytes(
4643            i16x8_narrow_i32x4(
4644                i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4645                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4646            ),
4647            i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4648        );
4649
4650        compare_bytes(
4651            u16x8_narrow_i32x4(
4652                i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4653                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4654            ),
4655            i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4656        );
4657    }
4658
4659    #[test]
4660    fn test_extend() {
4661        let zero = i8x16_splat(0);
4662        let ones = i8x16_splat(!0);
4663
4664        compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4665        compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4666        compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4667        compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4668        compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4669        compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4670        let halves = u16x8_splat(u8::MAX.into());
4671        compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4672        compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4673
4674        compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4675        compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4676        compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4677        compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4678        compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4679        compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4680        let halves = u32x4_splat(u16::MAX.into());
4681        compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4682        compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4683
4684        compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4685        compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4686        compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4687        compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4688        compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4689        compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4690        let halves = i64x2_splat(u32::MAX.into());
4691        compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4692        compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4693    }
4694
4695    #[test]
4696    fn test_dot() {
4697        let zero = i8x16_splat(0);
4698        let ones = i8x16_splat(!0);
4699        let two = i32x4_splat(2);
4700        compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4701        compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4702    }
4703
4704    macro_rules! test_binop {
4705        (
4706            $($name:ident => {
4707                $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4708            })*
4709        ) => ($(
4710            #[test]
4711            fn $name() {
4712                unsafe {
4713                    $(
4714                        let v1 = [$($vec1)*];
4715                        let v2 = [$($vec2)*];
4716                        let v1_v128: v128 = mem::transmute(v1);
4717                        let v2_v128: v128 = mem::transmute(v2);
4718                        let v3_v128 = super::$f(v1_v128, v2_v128);
4719                        let mut v3 = [$($vec1)*];
4720                        let _ignore = v3;
4721                        v3 = mem::transmute(v3_v128);
4722
4723                        for (i, actual) in v3.iter().enumerate() {
4724                            let expected = v1[i].$op(v2[i]);
4725                            assert_eq!(*actual, expected);
4726                        }
4727                    )*
4728                }
4729            }
4730        )*)
4731    }
4732
4733    macro_rules! test_unop {
4734        (
4735            $($name:ident => {
4736                $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4737            })*
4738        ) => ($(
4739            #[test]
4740            fn $name() {
4741                unsafe {
4742                    $(
4743                        let v1 = [$($vec1)*];
4744                        let v1_v128: v128 = mem::transmute(v1);
4745                        let v2_v128 = super::$f(v1_v128);
4746                        let mut v2 = [$($vec1)*];
4747                        let _ignore = v2;
4748                        v2 = mem::transmute(v2_v128);
4749
4750                        for (i, actual) in v2.iter().enumerate() {
4751                            let expected = v1[i].$op();
4752                            assert_eq!(*actual, expected);
4753                        }
4754                    )*
4755                }
4756            }
4757        )*)
4758    }
4759
4760    trait Avgr: Sized {
4761        fn avgr(self, other: Self) -> Self;
4762    }
4763
4764    macro_rules! impl_avgr {
4765        ($($i:ident)*) => ($(impl Avgr for $i {
4766            fn avgr(self, other: Self) -> Self {
4767                ((self as u64 + other as u64 + 1) / 2) as $i
4768            }
4769        })*)
4770    }
4771
4772    impl_avgr!(u8 u16);
4773
4774    test_binop! {
4775        test_i8x16_add => {
4776            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4777                (wrapping_add | i8x16_add)
4778            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4779
4780            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4781                (wrapping_add | i8x16_add)
4782            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4783
4784            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4785                (wrapping_add | i8x16_add)
4786            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4787        }
4788
4789        test_i8x16_add_sat_s => {
4790            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4791                (saturating_add | i8x16_add_sat)
4792            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4793
4794            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4795                (saturating_add | i8x16_add_sat)
4796            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4797
4798            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4799                (saturating_add | i8x16_add_sat)
4800            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4801        }
4802
4803        test_i8x16_add_sat_u => {
4804            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4805                (saturating_add | u8x16_add_sat)
4806            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4807
4808            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4809                (saturating_add | u8x16_add_sat)
4810            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4811
4812            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4813                (saturating_add | u8x16_add_sat)
4814            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4815        }
4816
4817        test_i8x16_sub => {
4818            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4819                (wrapping_sub | i8x16_sub)
4820            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4821
4822            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4823                (wrapping_sub | i8x16_sub)
4824            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4825
4826            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4827                (wrapping_sub | i8x16_sub)
4828            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4829        }
4830
4831        test_i8x16_sub_sat_s => {
4832            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4833                (saturating_sub | i8x16_sub_sat)
4834            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4835
4836            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4837                (saturating_sub | i8x16_sub_sat)
4838            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4839
4840            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4841                (saturating_sub | i8x16_sub_sat)
4842            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4843        }
4844
4845        test_i8x16_sub_sat_u => {
4846            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4847                (saturating_sub | u8x16_sub_sat)
4848            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4849
4850            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4851                (saturating_sub | u8x16_sub_sat)
4852            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4853
4854            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4855                (saturating_sub | u8x16_sub_sat)
4856            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4857        }
4858
4859        test_i8x16_min_s => {
4860            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4861                (min | i8x16_min)
4862            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4863
4864            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4865                (min | i8x16_min)
4866            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4867
4868            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4869                (min | i8x16_min)
4870            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4871        }
4872
4873        test_i8x16_min_u => {
4874            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4875                (min | u8x16_min)
4876            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4877
4878            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4879                (min | u8x16_min)
4880            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4881
4882            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4883                (min | u8x16_min)
4884            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4885        }
4886
4887        test_i8x16_max_s => {
4888            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4889                (max | i8x16_max)
4890            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4891
4892            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4893                (max | i8x16_max)
4894            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4895
4896            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4897                (max | i8x16_max)
4898            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4899        }
4900
4901        test_i8x16_max_u => {
4902            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4903                (max | u8x16_max)
4904            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4905
4906            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4907                (max | u8x16_max)
4908            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4909
4910            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4911                (max | u8x16_max)
4912            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4913        }
4914
4915        test_i8x16_avgr_u => {
4916            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4917                (avgr | u8x16_avgr)
4918            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4919
4920            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4921                (avgr | u8x16_avgr)
4922            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4923
4924            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4925                (avgr | u8x16_avgr)
4926            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4927        }
4928
4929        test_i16x8_add => {
4930            [0i16, 0, 0, 0, 0, 0, 0, 0]
4931                (wrapping_add | i16x8_add)
4932            [1i16, 1, 1, 1, 1, 1, 1, 1],
4933
4934            [1i16, 2, 3, 4, 5, 6, 7, 8]
4935                (wrapping_add | i16x8_add)
4936            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4937        }
4938
4939        test_i16x8_add_sat_s => {
4940            [0i16, 0, 0, 0, 0, 0, 0, 0]
4941                (saturating_add | i16x8_add_sat)
4942            [1i16, 1, 1, 1, 1, 1, 1, 1],
4943
4944            [1i16, 2, 3, 4, 5, 6, 7, 8]
4945                (saturating_add | i16x8_add_sat)
4946            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4947        }
4948
4949        test_i16x8_add_sat_u => {
4950            [0u16, 0, 0, 0, 0, 0, 0, 0]
4951                (saturating_add | u16x8_add_sat)
4952            [1u16, 1, 1, 1, 1, 1, 1, 1],
4953
4954            [1u16, 2, 3, 4, 5, 6, 7, 8]
4955                (saturating_add | u16x8_add_sat)
4956            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4957        }
4958
4959        test_i16x8_sub => {
4960            [0i16, 0, 0, 0, 0, 0, 0, 0]
4961                (wrapping_sub | i16x8_sub)
4962            [1i16, 1, 1, 1, 1, 1, 1, 1],
4963
4964            [1i16, 2, 3, 4, 5, 6, 7, 8]
4965                (wrapping_sub | i16x8_sub)
4966            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4967        }
4968
4969        test_i16x8_sub_sat_s => {
4970            [0i16, 0, 0, 0, 0, 0, 0, 0]
4971                (saturating_sub | i16x8_sub_sat)
4972            [1i16, 1, 1, 1, 1, 1, 1, 1],
4973
4974            [1i16, 2, 3, 4, 5, 6, 7, 8]
4975                (saturating_sub | i16x8_sub_sat)
4976            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4977        }
4978
4979        test_i16x8_sub_sat_u => {
4980            [0u16, 0, 0, 0, 0, 0, 0, 0]
4981                (saturating_sub | u16x8_sub_sat)
4982            [1u16, 1, 1, 1, 1, 1, 1, 1],
4983
4984            [1u16, 2, 3, 4, 5, 6, 7, 8]
4985                (saturating_sub | u16x8_sub_sat)
4986            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4987        }
4988
4989        test_i16x8_mul => {
4990            [0i16, 0, 0, 0, 0, 0, 0, 0]
4991                (wrapping_mul | i16x8_mul)
4992            [1i16, 1, 1, 1, 1, 1, 1, 1],
4993
4994            [1i16, 2, 3, 4, 5, 6, 7, 8]
4995                (wrapping_mul | i16x8_mul)
4996            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4997        }
4998
4999        test_i16x8_min_s => {
5000            [0i16, 0, 0, 0, 0, 0, 0, 0]
5001                (min | i16x8_min)
5002            [1i16, 1, 1, 1, 1, 1, 1, 1],
5003
5004            [1i16, 2, 3, 4, 5, 6, 7, 8]
5005                (min | i16x8_min)
5006            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5007        }
5008
5009        test_i16x8_min_u => {
5010            [0u16, 0, 0, 0, 0, 0, 0, 0]
5011                (min | u16x8_min)
5012            [1u16, 1, 1, 1, 1, 1, 1, 1],
5013
5014            [1u16, 2, 3, 4, 5, 6, 7, 8]
5015                (min | u16x8_min)
5016            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5017        }
5018
5019        test_i16x8_max_s => {
5020            [0i16, 0, 0, 0, 0, 0, 0, 0]
5021                (max | i16x8_max)
5022            [1i16, 1, 1, 1, 1, 1, 1, 1],
5023
5024            [1i16, 2, 3, 4, 5, 6, 7, 8]
5025                (max | i16x8_max)
5026            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5027        }
5028
5029        test_i16x8_max_u => {
5030            [0u16, 0, 0, 0, 0, 0, 0, 0]
5031                (max | u16x8_max)
5032            [1u16, 1, 1, 1, 1, 1, 1, 1],
5033
5034            [1u16, 2, 3, 4, 5, 6, 7, 8]
5035                (max | u16x8_max)
5036            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5037        }
5038
5039        test_i16x8_avgr_u => {
5040            [0u16, 0, 0, 0, 0, 0, 0, 0]
5041                (avgr | u16x8_avgr)
5042            [1u16, 1, 1, 1, 1, 1, 1, 1],
5043
5044            [1u16, 2, 3, 4, 5, 6, 7, 8]
5045                (avgr | u16x8_avgr)
5046            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5047        }
5048
5049        test_i32x4_add => {
5050            [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5051            [1i32, 1283, i32::MAX, i32::MIN]
5052                (wrapping_add | i32x4_add)
5053            [i32::MAX; 4],
5054        }
5055
5056        test_i32x4_sub => {
5057            [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5058            [1i32, 1283, i32::MAX, i32::MIN]
5059                (wrapping_sub | i32x4_sub)
5060            [i32::MAX; 4],
5061        }
5062
5063        test_i32x4_mul => {
5064            [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5065            [1i32, 1283, i32::MAX, i32::MIN]
5066                (wrapping_mul | i32x4_mul)
5067            [i32::MAX; 4],
5068        }
5069
5070        test_i32x4_min_s => {
5071            [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5072            [1i32, 1283, i32::MAX, i32::MIN]
5073                (min | i32x4_min)
5074            [i32::MAX; 4],
5075        }
5076
5077        test_i32x4_min_u => {
5078            [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5079            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5080                (min | u32x4_min)
5081            [i32::MAX as u32; 4],
5082        }
5083
5084        test_i32x4_max_s => {
5085            [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5086            [1i32, 1283, i32::MAX, i32::MIN]
5087                (max | i32x4_max)
5088            [i32::MAX; 4],
5089        }
5090
5091        test_i32x4_max_u => {
5092            [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5093            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5094                (max | u32x4_max)
5095            [i32::MAX as u32; 4],
5096        }
5097
5098        test_i64x2_add => {
5099            [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5100            [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5101            [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5102            [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5103        }
5104
5105        test_i64x2_sub => {
5106            [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5107            [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5108            [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5109            [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5110        }
5111
5112        test_i64x2_mul => {
5113            [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5114            [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5115            [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5116            [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5117        }
5118
5119        test_f32x4_add => {
5120            [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5121            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5122                (add | f32x4_add)
5123            [1., 2., 0., 0.],
5124        }
5125
5126        test_f32x4_sub => {
5127            [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5128            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5129                (sub | f32x4_sub)
5130            [1., 2., 0., 0.],
5131        }
5132
5133        test_f32x4_mul => {
5134            [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5135            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5136                (mul | f32x4_mul)
5137            [1., 2., 1., 0.],
5138        }
5139
5140        test_f32x4_div => {
5141            [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5142            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5143                (div | f32x4_div)
5144            [1., 2., 0., 0.],
5145        }
5146
5147        test_f32x4_min => {
5148            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5149            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5150                (min | f32x4_min)
5151            [1., 2., 0., 0.],
5152        }
5153
5154        test_f32x4_max => {
5155            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5156            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5157                (max | f32x4_max)
5158            [1., 2., 0., 0.],
5159        }
5160
5161        test_f32x4_pmin => {
5162            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5163            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5164                (min | f32x4_pmin)
5165            [1., 2., 0., 0.],
5166        }
5167
5168        test_f32x4_pmax => {
5169            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5170            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5171                (max | f32x4_pmax)
5172            [1., 2., 0., 0.],
5173        }
5174
5175        test_f64x2_add => {
5176            [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5177            [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5178        }
5179
5180        test_f64x2_sub => {
5181            [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5182            [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5183        }
5184
5185        test_f64x2_mul => {
5186            [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5187            [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5188        }
5189
5190        test_f64x2_div => {
5191            [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5192            [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5193        }
5194
5195        test_f64x2_min => {
5196            [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5197            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5198        }
5199
5200        test_f64x2_max => {
5201            [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5202            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5203        }
5204
5205        test_f64x2_pmin => {
5206            [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5207            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5208        }
5209
5210        test_f64x2_pmax => {
5211            [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5212            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5213        }
5214    }
5215
5216    test_unop! {
5217        test_i8x16_abs => {
5218            (wrapping_abs | i8x16_abs)
5219            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5220
5221            (wrapping_abs | i8x16_abs)
5222            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5223
5224            (wrapping_abs | i8x16_abs)
5225            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5226        }
5227
5228        test_i8x16_neg => {
5229            (wrapping_neg | i8x16_neg)
5230            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5231
5232            (wrapping_neg | i8x16_neg)
5233            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5234
5235            (wrapping_neg | i8x16_neg)
5236            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5237        }
5238
5239        test_i16x8_abs => {
5240            (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5241            (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5242        }
5243
5244        test_i16x8_neg => {
5245            (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5246            (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5247        }
5248
5249        test_i32x4_abs => {
5250            (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5251            (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5252        }
5253
5254        test_i32x4_neg => {
5255            (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5256            (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5257        }
5258
5259        test_i64x2_abs => {
5260            (wrapping_abs | i64x2_abs) [1i64, 2],
5261            (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5262        }
5263
5264        test_i64x2_neg => {
5265            (wrapping_neg | i64x2_neg) [1i64, 2],
5266            (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5267        }
5268
5269        test_f32x4_ceil => {
5270            (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5271            (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5272        }
5273
5274        test_f32x4_floor => {
5275            (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5276            (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5277        }
5278
5279        test_f32x4_trunc => {
5280            (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5281            (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5282        }
5283
5284        test_f32x4_nearest => {
5285            (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5286            (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5287        }
5288
5289        test_f32x4_abs => {
5290            (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5291            (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5292        }
5293
5294        test_f32x4_neg => {
5295            (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5296            (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5297        }
5298
5299        test_f32x4_sqrt => {
5300            (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5301            (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5302        }
5303
5304        test_f64x2_ceil => {
5305            (ceil | f64x2_ceil) [1.0f64, 2.3],
5306            (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5307        }
5308
5309        test_f64x2_floor => {
5310            (floor | f64x2_floor) [1.0f64, 2.3],
5311            (floor | f64x2_floor) [f64::INFINITY, -0.1],
5312        }
5313
5314        test_f64x2_trunc => {
5315            (trunc | f64x2_trunc) [1.0f64, 2.3],
5316            (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5317        }
5318
5319        test_f64x2_nearest => {
5320            (round | f64x2_nearest) [1.0f64, 2.3],
5321            (round | f64x2_nearest) [f64::INFINITY, -0.1],
5322        }
5323
5324        test_f64x2_abs => {
5325            (abs | f64x2_abs) [1.0f64, 2.3],
5326            (abs | f64x2_abs) [f64::INFINITY, -0.1],
5327        }
5328
5329        test_f64x2_neg => {
5330            (neg | f64x2_neg) [1.0f64, 2.3],
5331            (neg | f64x2_neg) [f64::INFINITY, -0.1],
5332        }
5333
5334        test_f64x2_sqrt => {
5335            (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5336            (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5337        }
5338    }
5339
5340    macro_rules! floating_point {
5341        (f32) => {
5342            true
5343        };
5344        (f64) => {
5345            true
5346        };
5347        ($id:ident) => {
5348            false
5349        };
5350    }
5351
5352    trait IsNan: Sized {
5353        fn is_nan(self) -> bool {
5354            false
5355        }
5356    }
5357    impl IsNan for i8 {}
5358    impl IsNan for i16 {}
5359    impl IsNan for i32 {}
5360    impl IsNan for i64 {}
5361
5362    macro_rules! test_bop {
5363         ($id:ident[$ety:ident; $ecount:expr] |
5364          $binary_op:ident [$op_test_id:ident] :
5365          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5366             test_bop!(
5367                 $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5368                 ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5369             );
5370
5371         };
5372         ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5373          $binary_op:ident [$op_test_id:ident] :
5374          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5375             #[test]
5376             fn $op_test_id() {
5377                 unsafe {
5378                     let a_input: [$ety; $ecount] = [$($in_a),*];
5379                     let b_input: [$ety; $ecount] = [$($in_b),*];
5380                     let output: [$oty; $ecount] = [$($out),*];
5381
5382                     let a_vec_in: v128 = transmute(a_input);
5383                     let b_vec_in: v128 = transmute(b_input);
5384                     let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5385
5386                     let res: [$oty; $ecount] = transmute(vec_res);
5387
5388                     if !floating_point!($ety) {
5389                         assert_eq!(res, output);
5390                     } else {
5391                         for i in 0..$ecount {
5392                             let r = res[i];
5393                             let o = output[i];
5394                             assert_eq!(r.is_nan(), o.is_nan());
5395                             if !r.is_nan() {
5396                                 assert_eq!(r, o);
5397                             }
5398                         }
5399                     }
5400                 }
5401             }
5402         }
5403     }
5404
5405    macro_rules! test_bops {
5406         ($id:ident[$ety:ident; $ecount:expr] |
5407          $binary_op:ident [$op_test_id:ident]:
5408          ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5409             #[test]
5410             fn $op_test_id() {
5411                 unsafe {
5412                     let a_input: [$ety; $ecount] = [$($in_a),*];
5413                     let output: [$ety; $ecount] = [$($out),*];
5414
5415                     let a_vec_in: v128 = transmute(a_input);
5416                     let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5417
5418                     let res: [$ety; $ecount] = transmute(vec_res);
5419                     assert_eq!(res, output);
5420                 }
5421             }
5422         }
5423     }
5424
5425    macro_rules! test_uop {
5426         ($id:ident[$ety:ident; $ecount:expr] |
5427          $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5428             #[test]
5429             fn $op_test_id() {
5430                 unsafe {
5431                     let a_input: [$ety; $ecount] = [$($in_a),*];
5432                     let output: [$ety; $ecount] = [$($out),*];
5433
5434                     let a_vec_in: v128 = transmute(a_input);
5435                     let vec_res: v128 = $unary_op(a_vec_in);
5436
5437                     let res: [$ety; $ecount] = transmute(vec_res);
5438                     assert_eq!(res, output);
5439                 }
5440             }
5441         }
5442     }
5443
5444    test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5445               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5446               [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5447    test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5448                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5449                [0, -2, 4, 6, 8, 10, 12, -2]);
5450    test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5451                ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5452    test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5453                ([0, -1], 1) => [0, -2]);
5454
5455    test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5456               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5457               [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5458    test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5459               ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5460               [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5461    test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5462               ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5463    test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5464               ([0, -1], 1) => [0, -1]);
5465
5466    test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5467                ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5468                [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5469    test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5470                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5471                [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5472    test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5473                ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5474    test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5475                ([0, -1], 1) => [0, i64::MAX]);
5476
5477    #[test]
5478    fn v128_bitwise_logical_ops() {
5479        unsafe {
5480            let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5481            let b: [u32; 4] = [u32::MAX; 4];
5482            let c: [u32; 4] = [0; 4];
5483
5484            let vec_a: v128 = transmute(a);
5485            let vec_b: v128 = transmute(b);
5486            let vec_c: v128 = transmute(c);
5487
5488            let r: v128 = v128_and(vec_a, vec_a);
5489            compare_bytes(r, vec_a);
5490            let r: v128 = v128_and(vec_a, vec_b);
5491            compare_bytes(r, vec_a);
5492            let r: v128 = v128_andnot(vec_a, vec_b);
5493            compare_bytes(r, vec_c);
5494            let r: v128 = v128_andnot(vec_a, vec_a);
5495            compare_bytes(r, vec_c);
5496            let r: v128 = v128_andnot(vec_a, vec_c);
5497            compare_bytes(r, vec_a);
5498            let r: v128 = v128_or(vec_a, vec_b);
5499            compare_bytes(r, vec_b);
5500            let r: v128 = v128_not(vec_b);
5501            compare_bytes(r, vec_c);
5502            let r: v128 = v128_xor(vec_a, vec_c);
5503            compare_bytes(r, vec_a);
5504
5505            let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5506            compare_bytes(r, vec_b);
5507            let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5508            compare_bytes(r, vec_c);
5509            let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5510            compare_bytes(r, vec_a);
5511        }
5512    }
5513
5514    macro_rules! test_bool_red {
5515         ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5516             #[test]
5517             fn $test_id() {
5518                 unsafe {
5519                     let vec_a: v128 = transmute([$($true),*]); let vec_b: v128 = transmute([$($false),*]); let vec_c: v128 = transmute([$($alt),*]); assert_eq!($all(vec_a), true);
5529                     assert_eq!($all(vec_b), false);
5530                     assert_eq!($all(vec_c), false);
5531                 }
5532             }
5533         }
5534     }
5535
5536    test_bool_red!(
5537        [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5538            | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5539            | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5540            | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5541    );
5542    test_bool_red!(
5543        [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5544            | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5545            | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5546            | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5547    );
5548    test_bool_red!(
5549        [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5550            | [1_i32, 1, 1, 1]
5551            | [0_i32, 0, 0, 0]
5552            | [1_i32, 0, 1, 0]
5553    );
5554    test_bool_red!(
5555        [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5556            | [1_i64, 1]
5557            | [0_i64, 0]
5558            | [1_i64, 0]
5559    );
5560
5561    test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5562              ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5563               [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5564              [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5565    test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5566               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5567               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5568    test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5569               ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5570    test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5571               ([0, 1], [0, 2]) => [-1, 0]);
5572    test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5573               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5574    test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5575
5576    test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5577               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5578                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5579               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5580    test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5581               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5582               [0, -1, 0, -1 ,0, -1, 0, 0]);
5583    test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5584               ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5585    test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5586               ([0, 1], [0, 2]) => [0, -1]);
5587    test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5588               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5589    test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5590
5591    test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5592               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5593                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5594               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5595    test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5596               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5597                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5598               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5599    test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5600               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5601               [0, -1, 0, -1 ,0, -1, 0, -1]);
5602    test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5603               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5604               [0, -1, 0, -1 ,0, -1, 0, 0]);
5605    test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5606               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5607    test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5608               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5609    test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5610               ([-1, 3], [0, 2]) => [-1, 0]);
5611    test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5612               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5613    test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5614
5615    test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5616           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5617            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5618               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5619    test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5620           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5621            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5622               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5623    test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5624               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5625               [0, -1, 0, -1 ,0, -1, 0, 0]);
5626    test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5627               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5628               [0, -1, 0, -1 ,0, -1, 0, -1]);
5629    test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5630               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5631    test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5632               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5633    test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5634               ([-1, 2], [0, 1]) => [0, -1]);
5635    test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5636               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5637    test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5638
5639    test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5640               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5641                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5642               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5643    test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5644               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5645                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5646               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5647    test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5648               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5649               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5650    test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5651               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5652               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5653    test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5654               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5655    test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5656               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5657    test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5658               ([0, 1], [-1, 2]) => [-1, 0]);
5659    test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5660               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5661    test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5662
5663    test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5664               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5665                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5666               ) =>
5667               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5668    test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5669               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5670                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5671               ) =>
5672               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5673    test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5674               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5675               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5676    test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5677               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5678               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5679    test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5680               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5681    test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5682               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5683    test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5684               ([0, 2], [0, 1]) => [-1, 0]);
5685    test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5686               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5687    test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5688
5689    test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5690    test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5691    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5692              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5693    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5694              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5695              => [0., -3., -4., f32::NAN]);
5696    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5697              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5698    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5699              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5700              => [1., -1., 7., f32::NAN]);
5701    test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5702              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5703    test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5704              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5705    test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5706              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5707    test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5708              ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5709
5710    test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5711    test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5712    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5713               ([0., -1.], [1., -3.]) => [0., -3.]);
5714    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5715               ([7., 8.], [-4., f64::NAN])
5716               => [ -4., f64::NAN]);
5717    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5718               ([0., -1.], [1., -3.]) => [1., -1.]);
5719    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5720               ([7., 8.], [ -4., f64::NAN])
5721               => [7., f64::NAN]);
5722    test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5723               ([0., -1.], [1., -3.]) => [1., -4.]);
5724    test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5725               ([0., -1.], [1., -3.]) => [-1., 2.]);
5726    test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5727               ([0., -1.], [1., -3.]) => [0., 3.]);
5728    test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5729               ([0., -8.], [1., 4.]) => [0., -2.]);
5730
5731    macro_rules! test_conv {
5732        ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr,  $to:expr) => {
5733            #[test]
5734            fn $test_id() {
5735                unsafe {
5736                    let from: v128 = transmute($from);
5737                    let to: v128 = transmute($to);
5738
5739                    let r: v128 = $conv_id(from);
5740
5741                    compare_bytes(r, to);
5742                }
5743            }
5744        };
5745    }
5746
5747    test_conv!(
5748        f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5749        [1_f32, 2., 3., 4.]
5750    );
5751    test_conv!(
5752        f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5753        [u32::MAX as f32, 2., 3., 4.]
5754    );
5755
5756    #[test]
5757    fn test_conversions() {
5758        compare_bytes(
5759            i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5760            i32x4(1, i32::MIN, i32::MAX, 0),
5761        );
5762        compare_bytes(
5763            u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5764            u32x4(1, 0, u32::MAX, 0),
5765        );
5766        compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5767        compare_bytes(
5768            f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5769            f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5770        );
5771        compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5772        compare_bytes(
5773            f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5774            f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5775        );
5776
5777        compare_bytes(
5778            i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5779            i32x4(1, i32::MIN, 0, 0),
5780        );
5781        compare_bytes(
5782            i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5783            i32x4(0, i32::MAX, 0, 0),
5784        );
5785        compare_bytes(
5786            u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5787            u32x4(1, 0, 0, 0),
5788        );
5789        compare_bytes(
5790            u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5791            u32x4(0, u32::MAX, 0, 0),
5792        );
5793    }
5794
5795    #[test]
5796    fn test_popcnt() {
5797        unsafe {
5798            for i in 0..=255 {
5799                compare_bytes(
5800                    i8x16_popcnt(u8x16_splat(i)),
5801                    u8x16_splat(i.count_ones() as u8),
5802                )
5803            }
5804
5805            let vectors = [
5806                [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5807                [
5808                    100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5809                ],
5810            ];
5811
5812            for vector in vectors.iter() {
5813                compare_bytes(
5814                    i8x16_popcnt(transmute(*vector)),
5815                    i8x16(
5816                        vector[0].count_ones() as i8,
5817                        vector[1].count_ones() as i8,
5818                        vector[2].count_ones() as i8,
5819                        vector[3].count_ones() as i8,
5820                        vector[4].count_ones() as i8,
5821                        vector[5].count_ones() as i8,
5822                        vector[6].count_ones() as i8,
5823                        vector[7].count_ones() as i8,
5824                        vector[8].count_ones() as i8,
5825                        vector[9].count_ones() as i8,
5826                        vector[10].count_ones() as i8,
5827                        vector[11].count_ones() as i8,
5828                        vector[12].count_ones() as i8,
5829                        vector[13].count_ones() as i8,
5830                        vector[14].count_ones() as i8,
5831                        vector[15].count_ones() as i8,
5832                    ),
5833                )
5834            }
5835        }
5836    }
5837
5838    #[test]
5839    fn test_promote_demote() {
5840        let tests = [
5841            [1., 2.],
5842            [f64::NAN, f64::INFINITY],
5843            [100., 201.],
5844            [0., -0.],
5845            [f64::NEG_INFINITY, 0.],
5846        ];
5847
5848        for [a, b] in tests {
5849            compare_bytes(
5850                f32x4_demote_f64x2_zero(f64x2(a, b)),
5851                f32x4(a as f32, b as f32, 0., 0.),
5852            );
5853            compare_bytes(
5854                f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5855                f64x2(a, b),
5856            );
5857        }
5858    }
5859
5860    #[test]
5861    fn test_extmul() {
5862        macro_rules! test {
5863            ($(
5864                $ctor:ident {
5865                    from: $from:ident,
5866                    to: $to:ident,
5867                    low: $low:ident,
5868                    high: $high:ident,
5869                } => {
5870                    $(([$($a:tt)*] * [$($b:tt)*]))*
5871                }
5872            )*) => ($(
5873                $(unsafe {
5874                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5875                    let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5876                    let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5877                    let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5878
5879                    let half = a.len() / 2;
5880                    for i in 0..half {
5881                        assert_eq!(
5882                            (a[i] as $to).wrapping_mul((b[i] as $to)),
5883                            low[i],
5884                            "expected {} * {}", a[i] as $to, b[i] as $to,
5885                        );
5886                        assert_eq!(
5887                            (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5888                            high[i],
5889                            "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5890                        );
5891                    }
5892                })*
5893            )*)
5894        }
5895        test! {
5896            i8x16 {
5897                from: i8,
5898                to: i16,
5899                low: i16x8_extmul_low_i8x16,
5900                high: i16x8_extmul_high_i8x16,
5901            } => {
5902                (
5903                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5904                        *
5905                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5906                )
5907                (
5908                    [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5909                        *
5910                    [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5911                )
5912            }
5913            u8x16 {
5914                from: u8,
5915                to: u16,
5916                low: u16x8_extmul_low_u8x16,
5917                high: u16x8_extmul_high_u8x16,
5918            } => {
5919                (
5920                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5921                        *
5922                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5923                )
5924                (
5925                    [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5926                        *
5927                    [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5928                )
5929            }
5930            i16x8 {
5931                from: i16,
5932                to: i32,
5933                low: i32x4_extmul_low_i16x8,
5934                high: i32x4_extmul_high_i16x8,
5935            } => {
5936                (
5937                    [0, 0, 0, 0, 0, 0, 0, 0]
5938                        *
5939                    [0, 0, 0, 0, 0, 0, 0, 0]
5940                )
5941                (
5942                    [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5943                        *
5944                    [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5945                )
5946            }
5947            u16x8 {
5948                from: u16,
5949                to: u32,
5950                low: u32x4_extmul_low_u16x8,
5951                high: u32x4_extmul_high_u16x8,
5952            } => {
5953                (
5954                    [0, 0, 0, 0, 0, 0, 0, 0]
5955                        *
5956                    [0, 0, 0, 0, 0, 0, 0, 0]
5957                )
5958                (
5959                    [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5960                        *
5961                    [1, 1, 3, 29391, 105, 2, 100, 2]
5962                )
5963            }
5964            i32x4 {
5965                from: i32,
5966                to: i64,
5967                low: i64x2_extmul_low_i32x4,
5968                high: i64x2_extmul_high_i32x4,
5969            } => {
5970                (
5971                    [0, 0, 0, 0]
5972                        *
5973                    [0, 0, 0, 0]
5974                )
5975                (
5976                    [-1, 0, i32::MAX, 19931]
5977                        *
5978                    [1, 1, i32::MIN, 29391]
5979                )
5980                (
5981                    [i32::MAX, 3003183, 3 << 20, 0xffffff]
5982                        *
5983                    [i32::MAX, i32::MIN, -40042, 300]
5984                )
5985            }
5986            u32x4 {
5987                from: u32,
5988                to: u64,
5989                low: u64x2_extmul_low_u32x4,
5990                high: u64x2_extmul_high_u32x4,
5991            } => {
5992                (
5993                    [0, 0, 0, 0]
5994                        *
5995                    [0, 0, 0, 0]
5996                )
5997                (
5998                    [1, 0, u32::MAX, 19931]
5999                        *
6000                    [1, 1, 3, 29391]
6001                )
6002                (
6003                    [u32::MAX, 3003183, 3 << 20, 0xffffff]
6004                        *
6005                    [u32::MAX, 3000, 40042, 300]
6006                )
6007            }
6008        }
6009    }
6010
6011    #[test]
6012    fn test_q15mulr_sat_s() {
6013        fn test(a: [i16; 8], b: [i16; 8]) {
6014            let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
6015            let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
6016            let result = i16x8_q15mulr_sat(a_v, b_v);
6017            let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
6018
6019            for (i, (a, b)) in a.iter().zip(&b).enumerate() {
6020                assert_eq!(
6021                    result[i],
6022                    (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
6023                );
6024            }
6025        }
6026
6027        test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
6028        test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
6029        test(
6030            [-1, 100, 2003, -29494, 12, 128, 994, 1],
6031            [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
6032        );
6033    }
6034
6035    #[test]
6036    fn test_extadd() {
6037        macro_rules! test {
6038            ($(
6039                $func:ident {
6040                    from: $from:ident,
6041                    to: $to:ident,
6042                } => {
6043                    $([$($a:tt)*])*
6044                }
6045            )*) => ($(
6046                $(unsafe {
6047                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6048                    let a_v = mem::transmute::<_, v128>(a);
6049                    let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6050
6051                    let half = a.len() / 2;
6052                    for i in 0..half {
6053                        assert_eq!(
6054                            (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6055                            r[i],
6056                            "failed {} + {} != {}",
6057                            a[2 * i] as $to,
6058                            a[2 * i + 1] as $to,
6059                            r[i],
6060                        );
6061                    }
6062                })*
6063            )*)
6064        }
6065        test! {
6066            i16x8_extadd_pairwise_i8x16 {
6067                from: i8,
6068                to: i16,
6069            } => {
6070                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6071                [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6072                [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6073            }
6074            i16x8_extadd_pairwise_u8x16 {
6075                from: u8,
6076                to: i16,
6077            } => {
6078                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6079                [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6080                [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6081            }
6082            i32x4_extadd_pairwise_i16x8 {
6083                from: i16,
6084                to: i32,
6085            } => {
6086                [0, 0, 0, 0, 0, 0, 0, 0]
6087                [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6088                [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6089            }
6090            i32x4_extadd_pairwise_u16x8 {
6091                from: u16,
6092                to: i32,
6093            } => {
6094                [0, 0, 0, 0, 0, 0, 0, 0]
6095                [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6096                [1, 1, 3, 29391, 105, 2, 100, 2]
6097            }
6098        }
6099    }
6100}