1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#[cfg(test)]
use stdsimd_test::assert_instr;
#[inline]
#[cfg_attr(test, assert_instr(rev))]
pub unsafe fn _rev_u64(x: u64) -> u64 {
x.swap_bytes() as u64
}
#[inline]
#[cfg_attr(test, assert_instr(clz))]
pub unsafe fn _clz_u64(x: u64) -> u64 {
x.leading_zeros() as u64
}
#[inline]
#[cfg_attr(test, assert_instr(rbit))]
pub unsafe fn _rbit_u64(x: u64) -> u64 {
use intrinsics::bitreverse;
bitreverse(x)
}
#[inline]
#[cfg_attr(test, assert_instr(cls))]
pub unsafe fn _cls_u32(x: u32) -> u32 {
u32::leading_zeros((((((x as i32) >> 31) as u32) ^ x) << 1) | 1) as u32
}
#[inline]
#[cfg_attr(test, assert_instr(cls))]
pub unsafe fn _cls_u64(x: u64) -> u64 {
u64::leading_zeros((((((x as i64) >> 63) as u64) ^ x) << 1) | 1) as u64
}
#[cfg(test)]
mod tests {
use coresimd::aarch64::v8;
#[test]
fn _rev_u64() {
unsafe {
assert_eq!(
v8::_rev_u64(0b0000_0000_1111_1111_0000_0000_1111_1111_u64),
0b1111_1111_0000_0000_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_u64
);
}
}
#[test]
fn _clz_u64() {
unsafe {
assert_eq!(v8::_clz_u64(0b0000_1010u64), 60u64);
}
}
#[test]
fn _rbit_u64() {
unsafe {
assert_eq!(
v8::_rbit_u64(0b0000_0000_1111_1101_0000_0000_1111_1111_u64),
0b1111_1111_0000_0000_1011_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_u64
);
}
}
#[test]
fn _cls_u32() {
unsafe {
assert_eq!(
v8::_cls_u32(0b1111_1111_1111_1111_0000_0000_1111_1111_u32),
15_u32
);
}
}
#[test]
fn _cls_u64() {
unsafe {
assert_eq!(
v8::_cls_u64(
0b1111_1111_1111_1111_0000_0000_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_u64
),
15_u64
);
}
}
}