ppv_lite86/
soft.rs

1//! Implement 256- and 512- bit in terms of 128-bit, for machines without native wide SIMD.
2
3use crate::types::*;
4use crate::{vec128_storage, vec256_storage, vec512_storage};
5use core::marker::PhantomData;
6use core::ops::*;
7
8#[derive(Copy, Clone, Default)]
9#[allow(non_camel_case_types)]
10pub struct x2<W, G>(pub [W; 2], PhantomData<G>);
11impl<W, G> x2<W, G> {
12    #[inline(always)]
13    pub fn new(xs: [W; 2]) -> Self {
14        x2(xs, PhantomData)
15    }
16}
17macro_rules! fwd_binop_x2 {
18    ($trait:ident, $fn:ident) => {
19        impl<W: $trait + Copy, G> $trait for x2<W, G> {
20            type Output = x2<W::Output, G>;
21            #[inline(always)]
22            fn $fn(self, rhs: Self) -> Self::Output {
23                x2::new([self.0[0].$fn(rhs.0[0]), self.0[1].$fn(rhs.0[1])])
24            }
25        }
26    };
27}
28macro_rules! fwd_binop_assign_x2 {
29    ($trait:ident, $fn_assign:ident) => {
30        impl<W: $trait + Copy, G> $trait for x2<W, G> {
31            #[inline(always)]
32            fn $fn_assign(&mut self, rhs: Self) {
33                (self.0[0]).$fn_assign(rhs.0[0]);
34                (self.0[1]).$fn_assign(rhs.0[1]);
35            }
36        }
37    };
38}
39macro_rules! fwd_unop_x2 {
40    ($fn:ident) => {
41        #[inline(always)]
42        fn $fn(self) -> Self {
43            x2::new([self.0[0].$fn(), self.0[1].$fn()])
44        }
45    };
46}
47impl<W, G> RotateEachWord32 for x2<W, G>
48where
49    W: Copy + RotateEachWord32,
50{
51    fwd_unop_x2!(rotate_each_word_right7);
52    fwd_unop_x2!(rotate_each_word_right8);
53    fwd_unop_x2!(rotate_each_word_right11);
54    fwd_unop_x2!(rotate_each_word_right12);
55    fwd_unop_x2!(rotate_each_word_right16);
56    fwd_unop_x2!(rotate_each_word_right20);
57    fwd_unop_x2!(rotate_each_word_right24);
58    fwd_unop_x2!(rotate_each_word_right25);
59}
60impl<W, G> RotateEachWord64 for x2<W, G>
61where
62    W: Copy + RotateEachWord64,
63{
64    fwd_unop_x2!(rotate_each_word_right32);
65}
66impl<W, G> RotateEachWord128 for x2<W, G> where W: RotateEachWord128 {}
67impl<W, G> BitOps0 for x2<W, G>
68where
69    W: BitOps0,
70    G: Copy,
71{
72}
73impl<W, G> BitOps32 for x2<W, G>
74where
75    W: BitOps32 + BitOps0,
76    G: Copy,
77{
78}
79impl<W, G> BitOps64 for x2<W, G>
80where
81    W: BitOps64 + BitOps0,
82    G: Copy,
83{
84}
85impl<W, G> BitOps128 for x2<W, G>
86where
87    W: BitOps128 + BitOps0,
88    G: Copy,
89{
90}
91fwd_binop_x2!(BitAnd, bitand);
92fwd_binop_x2!(BitOr, bitor);
93fwd_binop_x2!(BitXor, bitxor);
94fwd_binop_x2!(AndNot, andnot);
95fwd_binop_assign_x2!(BitAndAssign, bitand_assign);
96fwd_binop_assign_x2!(BitOrAssign, bitor_assign);
97fwd_binop_assign_x2!(BitXorAssign, bitxor_assign);
98impl<W, G> ArithOps for x2<W, G>
99where
100    W: ArithOps,
101    G: Copy,
102{
103}
104fwd_binop_x2!(Add, add);
105fwd_binop_assign_x2!(AddAssign, add_assign);
106impl<W: Not + Copy, G> Not for x2<W, G> {
107    type Output = x2<W::Output, G>;
108    #[inline(always)]
109    fn not(self) -> Self::Output {
110        x2::new([self.0[0].not(), self.0[1].not()])
111    }
112}
113impl<W, G> UnsafeFrom<[W; 2]> for x2<W, G> {
114    #[inline(always)]
115    unsafe fn unsafe_from(xs: [W; 2]) -> Self {
116        x2::new(xs)
117    }
118}
119impl<W: Copy, G> Vec2<W> for x2<W, G> {
120    #[inline(always)]
121    fn extract(self, i: u32) -> W {
122        self.0[i as usize]
123    }
124    #[inline(always)]
125    fn insert(mut self, w: W, i: u32) -> Self {
126        self.0[i as usize] = w;
127        self
128    }
129}
130impl<W: Copy + Store<vec128_storage>, G> Store<vec256_storage> for x2<W, G> {
131    #[inline(always)]
132    unsafe fn unpack(p: vec256_storage) -> Self {
133        let p = p.split128();
134        x2::new([W::unpack(p[0]), W::unpack(p[1])])
135    }
136}
137impl<W, G> From<x2<W, G>> for vec256_storage
138where
139    W: Copy,
140    vec128_storage: From<W>,
141{
142    #[inline(always)]
143    fn from(x: x2<W, G>) -> Self {
144        vec256_storage::new128([x.0[0].into(), x.0[1].into()])
145    }
146}
147impl<W, G> Swap64 for x2<W, G>
148where
149    W: Swap64 + Copy,
150{
151    fwd_unop_x2!(swap1);
152    fwd_unop_x2!(swap2);
153    fwd_unop_x2!(swap4);
154    fwd_unop_x2!(swap8);
155    fwd_unop_x2!(swap16);
156    fwd_unop_x2!(swap32);
157    fwd_unop_x2!(swap64);
158}
159impl<W: Copy, G> MultiLane<[W; 2]> for x2<W, G> {
160    #[inline(always)]
161    fn to_lanes(self) -> [W; 2] {
162        self.0
163    }
164    #[inline(always)]
165    fn from_lanes(lanes: [W; 2]) -> Self {
166        x2::new(lanes)
167    }
168}
169impl<W: BSwap + Copy, G> BSwap for x2<W, G> {
170    #[inline(always)]
171    fn bswap(self) -> Self {
172        x2::new([self.0[0].bswap(), self.0[1].bswap()])
173    }
174}
175impl<W: StoreBytes + BSwap + Copy, G> StoreBytes for x2<W, G> {
176    #[inline(always)]
177    unsafe fn unsafe_read_le(input: &[u8]) -> Self {
178        let input = input.split_at(16);
179        x2::new([W::unsafe_read_le(input.0), W::unsafe_read_le(input.1)])
180    }
181    #[inline(always)]
182    unsafe fn unsafe_read_be(input: &[u8]) -> Self {
183        x2::unsafe_read_le(input).bswap()
184    }
185    #[inline(always)]
186    fn write_le(self, out: &mut [u8]) {
187        let out = out.split_at_mut(16);
188        self.0[0].write_le(out.0);
189        self.0[1].write_le(out.1);
190    }
191    #[inline(always)]
192    fn write_be(self, out: &mut [u8]) {
193        let out = out.split_at_mut(16);
194        self.0[0].write_be(out.0);
195        self.0[1].write_be(out.1);
196    }
197}
198
199#[derive(Copy, Clone, Default)]
200#[allow(non_camel_case_types)]
201pub struct x4<W>(pub [W; 4]);
202impl<W> x4<W> {
203    #[inline(always)]
204    pub fn new(xs: [W; 4]) -> Self {
205        x4(xs)
206    }
207}
208macro_rules! fwd_binop_x4 {
209    ($trait:ident, $fn:ident) => {
210        impl<W: $trait + Copy> $trait for x4<W> {
211            type Output = x4<W::Output>;
212            #[inline(always)]
213            fn $fn(self, rhs: Self) -> Self::Output {
214                x4([
215                    self.0[0].$fn(rhs.0[0]),
216                    self.0[1].$fn(rhs.0[1]),
217                    self.0[2].$fn(rhs.0[2]),
218                    self.0[3].$fn(rhs.0[3]),
219                ])
220            }
221        }
222    };
223}
224macro_rules! fwd_binop_assign_x4 {
225    ($trait:ident, $fn_assign:ident) => {
226        impl<W: $trait + Copy> $trait for x4<W> {
227            #[inline(always)]
228            fn $fn_assign(&mut self, rhs: Self) {
229                self.0[0].$fn_assign(rhs.0[0]);
230                self.0[1].$fn_assign(rhs.0[1]);
231                self.0[2].$fn_assign(rhs.0[2]);
232                self.0[3].$fn_assign(rhs.0[3]);
233            }
234        }
235    };
236}
237macro_rules! fwd_unop_x4 {
238    ($fn:ident) => {
239        #[inline(always)]
240        fn $fn(self) -> Self {
241            x4([
242                self.0[0].$fn(),
243                self.0[1].$fn(),
244                self.0[2].$fn(),
245                self.0[3].$fn(),
246            ])
247        }
248    };
249}
250impl<W> RotateEachWord32 for x4<W>
251where
252    W: Copy + RotateEachWord32,
253{
254    fwd_unop_x4!(rotate_each_word_right7);
255    fwd_unop_x4!(rotate_each_word_right8);
256    fwd_unop_x4!(rotate_each_word_right11);
257    fwd_unop_x4!(rotate_each_word_right12);
258    fwd_unop_x4!(rotate_each_word_right16);
259    fwd_unop_x4!(rotate_each_word_right20);
260    fwd_unop_x4!(rotate_each_word_right24);
261    fwd_unop_x4!(rotate_each_word_right25);
262}
263impl<W> RotateEachWord64 for x4<W>
264where
265    W: Copy + RotateEachWord64,
266{
267    fwd_unop_x4!(rotate_each_word_right32);
268}
269impl<W> RotateEachWord128 for x4<W> where W: RotateEachWord128 {}
270impl<W> BitOps0 for x4<W> where W: BitOps0 {}
271impl<W> BitOps32 for x4<W> where W: BitOps32 + BitOps0 {}
272impl<W> BitOps64 for x4<W> where W: BitOps64 + BitOps0 {}
273impl<W> BitOps128 for x4<W> where W: BitOps128 + BitOps0 {}
274fwd_binop_x4!(BitAnd, bitand);
275fwd_binop_x4!(BitOr, bitor);
276fwd_binop_x4!(BitXor, bitxor);
277fwd_binop_x4!(AndNot, andnot);
278fwd_binop_assign_x4!(BitAndAssign, bitand_assign);
279fwd_binop_assign_x4!(BitOrAssign, bitor_assign);
280fwd_binop_assign_x4!(BitXorAssign, bitxor_assign);
281impl<W> ArithOps for x4<W> where W: ArithOps {}
282fwd_binop_x4!(Add, add);
283fwd_binop_assign_x4!(AddAssign, add_assign);
284impl<W: Not + Copy> Not for x4<W> {
285    type Output = x4<W::Output>;
286    #[inline(always)]
287    fn not(self) -> Self::Output {
288        x4([
289            self.0[0].not(),
290            self.0[1].not(),
291            self.0[2].not(),
292            self.0[3].not(),
293        ])
294    }
295}
296impl<W> UnsafeFrom<[W; 4]> for x4<W> {
297    #[inline(always)]
298    unsafe fn unsafe_from(xs: [W; 4]) -> Self {
299        x4(xs)
300    }
301}
302impl<W: Copy> Vec4<W> for x4<W> {
303    #[inline(always)]
304    fn extract(self, i: u32) -> W {
305        self.0[i as usize]
306    }
307    #[inline(always)]
308    fn insert(mut self, w: W, i: u32) -> Self {
309        self.0[i as usize] = w;
310        self
311    }
312}
313impl<W: Copy + Store<vec128_storage>> Store<vec512_storage> for x4<W> {
314    #[inline(always)]
315    unsafe fn unpack(p: vec512_storage) -> Self {
316        let p = p.split128();
317        x4([
318            W::unpack(p[0]),
319            W::unpack(p[1]),
320            W::unpack(p[2]),
321            W::unpack(p[3]),
322        ])
323    }
324}
325impl<W> From<x4<W>> for vec512_storage
326where
327    W: Copy,
328    vec128_storage: From<W>,
329{
330    #[inline(always)]
331    fn from(x: x4<W>) -> Self {
332        vec512_storage::new128([x.0[0].into(), x.0[1].into(), x.0[2].into(), x.0[3].into()])
333    }
334}
335impl<W> Swap64 for x4<W>
336where
337    W: Swap64 + Copy,
338{
339    fwd_unop_x4!(swap1);
340    fwd_unop_x4!(swap2);
341    fwd_unop_x4!(swap4);
342    fwd_unop_x4!(swap8);
343    fwd_unop_x4!(swap16);
344    fwd_unop_x4!(swap32);
345    fwd_unop_x4!(swap64);
346}
347impl<W: Copy> MultiLane<[W; 4]> for x4<W> {
348    #[inline(always)]
349    fn to_lanes(self) -> [W; 4] {
350        self.0
351    }
352    #[inline(always)]
353    fn from_lanes(lanes: [W; 4]) -> Self {
354        x4(lanes)
355    }
356}
357impl<W: BSwap + Copy> BSwap for x4<W> {
358    #[inline(always)]
359    fn bswap(self) -> Self {
360        x4([
361            self.0[0].bswap(),
362            self.0[1].bswap(),
363            self.0[2].bswap(),
364            self.0[3].bswap(),
365        ])
366    }
367}
368impl<W: StoreBytes + BSwap + Copy> StoreBytes for x4<W> {
369    #[inline(always)]
370    unsafe fn unsafe_read_le(input: &[u8]) -> Self {
371        x4([
372            W::unsafe_read_le(&input[0..16]),
373            W::unsafe_read_le(&input[16..32]),
374            W::unsafe_read_le(&input[32..48]),
375            W::unsafe_read_le(&input[48..64]),
376        ])
377    }
378    #[inline(always)]
379    unsafe fn unsafe_read_be(input: &[u8]) -> Self {
380        x4::unsafe_read_le(input).bswap()
381    }
382    #[inline(always)]
383    fn write_le(self, out: &mut [u8]) {
384        self.0[0].write_le(&mut out[0..16]);
385        self.0[1].write_le(&mut out[16..32]);
386        self.0[2].write_le(&mut out[32..48]);
387        self.0[3].write_le(&mut out[48..64]);
388    }
389    #[inline(always)]
390    fn write_be(self, out: &mut [u8]) {
391        self.0[0].write_be(&mut out[0..16]);
392        self.0[1].write_be(&mut out[16..32]);
393        self.0[2].write_be(&mut out[32..48]);
394        self.0[3].write_be(&mut out[48..64]);
395    }
396}
397impl<W: Copy + LaneWords4> LaneWords4 for x4<W> {
398    #[inline(always)]
399    fn shuffle_lane_words2301(self) -> Self {
400        x4([
401            self.0[0].shuffle_lane_words2301(),
402            self.0[1].shuffle_lane_words2301(),
403            self.0[2].shuffle_lane_words2301(),
404            self.0[3].shuffle_lane_words2301(),
405        ])
406    }
407    #[inline(always)]
408    fn shuffle_lane_words1230(self) -> Self {
409        x4([
410            self.0[0].shuffle_lane_words1230(),
411            self.0[1].shuffle_lane_words1230(),
412            self.0[2].shuffle_lane_words1230(),
413            self.0[3].shuffle_lane_words1230(),
414        ])
415    }
416    #[inline(always)]
417    fn shuffle_lane_words3012(self) -> Self {
418        x4([
419            self.0[0].shuffle_lane_words3012(),
420            self.0[1].shuffle_lane_words3012(),
421            self.0[2].shuffle_lane_words3012(),
422            self.0[3].shuffle_lane_words3012(),
423        ])
424    }
425}