xref: /linux/lib/crc/arm64/crc64-neon-inner.c (revision d142ab35ee0b7f9e84115fe3e4c3de4a9ac35f5e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Accelerated CRC64 (NVMe) using ARM NEON C intrinsics
4  */
5 
6 #include <linux/types.h>
7 #include <asm/neon-intrinsics.h>
8 
9 u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len);
10 
11 /* x^191 mod G, x^127 mod G */
12 static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL,
13 					0x21e9761e252621acULL };
14 /* floor(x^127 / G), (G - x^64) / x */
15 static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL,
16 				    0x34d926535897936aULL };
17 
pmull64(uint64x2_t a,uint64x2_t b)18 static inline uint64x2_t pmull64(uint64x2_t a, uint64x2_t b)
19 {
20 	return vreinterpretq_u64_p128(vmull_p64(vgetq_lane_u64(a, 0),
21 						vgetq_lane_u64(b, 0)));
22 }
23 
pmull64_high(uint64x2_t a,uint64x2_t b)24 static inline uint64x2_t pmull64_high(uint64x2_t a, uint64x2_t b)
25 {
26 	poly64x2_t l = vreinterpretq_p64_u64(a);
27 	poly64x2_t m = vreinterpretq_p64_u64(b);
28 
29 	return vreinterpretq_u64_p128(vmull_high_p64(l, m));
30 }
31 
pmull64_hi_lo(uint64x2_t a,uint64x2_t b)32 static inline uint64x2_t pmull64_hi_lo(uint64x2_t a, uint64x2_t b)
33 {
34 	return vreinterpretq_u64_p128(vmull_p64(vgetq_lane_u64(a, 1),
35 						vgetq_lane_u64(b, 0)));
36 }
37 
crc64_nvme_arm64_c(u64 crc,const u8 * p,size_t len)38 u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len)
39 {
40 	uint64x2_t fold_consts = vld1q_u64(fold_consts_val);
41 	uint64x2_t v0 = { crc, 0 };
42 	uint64x2_t zero = { };
43 
44 	for (;;) {
45 		v0 ^= vreinterpretq_u64_u8(vld1q_u8(p));
46 
47 		p += 16;
48 		len -= 16;
49 		if (len < 16)
50 			break;
51 
52 		v0 = pmull64(fold_consts, v0) ^ pmull64_high(fold_consts, v0);
53 	}
54 
55 	/* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */
56 	v0 = vextq_u64(v0, zero, 1) ^ pmull64_hi_lo(fold_consts, v0);
57 
58 	/* Final Barrett reduction */
59 	uint64x2_t bconsts = vld1q_u64(bconsts_val);
60 	uint64x2_t final = pmull64(bconsts, v0);
61 
62 	v0 ^= vextq_u64(zero, final, 1) ^ pmull64_hi_lo(bconsts, final);
63 
64 	return vgetq_lane_u64(v0, 1);
65 }
66