1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * This is based in part on Andrew Moon's poly1305-donna, which is in the
9 #include <linux/kernel.h>
10 #include <asm/unaligned.h>
11 #include <crypto/internal/poly1305.h>
13 typedef __uint128_t u128;
15 void poly1305_core_setkey(struct poly1305_core_key *key,
16 const u8 raw_key[POLY1305_BLOCK_SIZE])
20 /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
21 t0 = get_unaligned_le64(&raw_key[0]);
22 t1 = get_unaligned_le64(&raw_key[8]);
24 key->key.r64[0] = t0 & 0xffc0fffffffULL;
25 key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL;
26 key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL;
29 key->precomputed_s.r64[0] = key->key.r64[1] * 20;
30 key->precomputed_s.r64[1] = key->key.r64[2] * 20;
32 EXPORT_SYMBOL(poly1305_core_setkey);
34 void poly1305_core_blocks(struct poly1305_state *state,
35 const struct poly1305_core_key *key, const void *src,
36 unsigned int nblocks, u32 hibit)
38 const u8 *input = src;
49 hibit64 = ((u64)hibit) << 40;
59 s1 = key->precomputed_s.r64[0];
60 s2 = key->precomputed_s.r64[1];
66 t0 = get_unaligned_le64(&input[0]);
67 t1 = get_unaligned_le64(&input[8]);
69 h0 += t0 & 0xfffffffffffULL;
70 h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL;
71 h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64;
90 /* (partial) h %= p */
92 h0 = (u64)d0 & 0xfffffffffffULL;
95 h1 = (u64)d1 & 0xfffffffffffULL;
98 h2 = (u64)d2 & 0x3ffffffffffULL;
101 h0 = h0 & 0xfffffffffffULL;
104 input += POLY1305_BLOCK_SIZE;
111 EXPORT_SYMBOL(poly1305_core_blocks);
113 void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
127 h1 &= 0xfffffffffffULL;
130 h2 &= 0x3ffffffffffULL;
133 h0 &= 0xfffffffffffULL;
136 h1 &= 0xfffffffffffULL;
139 h2 &= 0x3ffffffffffULL;
142 h0 &= 0xfffffffffffULL;
148 g0 &= 0xfffffffffffULL;
151 g1 &= 0xfffffffffffULL;
152 g2 = h2 + c - (1ULL << 42);
154 /* select h if h < p, or h + -p if h >= p */
155 c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
165 /* h = (h + nonce) */
166 t0 = ((u64)nonce[1] << 32) | nonce[0];
167 t1 = ((u64)nonce[3] << 32) | nonce[2];
169 h0 += t0 & 0xfffffffffffULL;
171 h0 &= 0xfffffffffffULL;
172 h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c;
174 h1 &= 0xfffffffffffULL;
175 h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c;
176 h2 &= 0x3ffffffffffULL;
179 /* mac = h % (2^128) */
180 h0 = h0 | (h1 << 44);
181 h1 = (h1 >> 20) | (h2 << 24);
183 put_unaligned_le64(h0, &mac[0]);
184 put_unaligned_le64(h1, &mac[8]);
186 EXPORT_SYMBOL(poly1305_core_emit);