comparison nss/lib/freebl/poly1305/poly1305-donna-x64-sse2-incremental-source.c @ 0:1e5118fa0cb1

This is NSS with a Cmake Buildsyste To compile a static NSS library for Windows we've used the Chromium-NSS fork and added a Cmake buildsystem to compile it statically for Windows. See README.chromium for chromium changes and README.trustbridge for our modifications.
author Andre Heinecke <andre.heinecke@intevation.de>
date Mon, 28 Jul 2014 10:47:06 +0200
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:1e5118fa0cb1
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 /* This implementation of poly1305 is by Andrew Moon
6 * (https://github.com/floodyberry/poly1305-donna) and released as public
7 * domain. It implements SIMD vectorization based on the algorithm described in
8 * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte
9 * block size. */
10
11 #include <emmintrin.h>
12 #include <stdint.h>
13
14 #include "poly1305.h"
15
16 #define ALIGN(x) __attribute__((aligned(x)))
17 #define INLINE inline
18 #define U8TO64_LE(m) (*(uint64_t*)(m))
19 #define U8TO32_LE(m) (*(uint32_t*)(m))
20 #define U64TO8_LE(m,v) (*(uint64_t*)(m)) = v
21
22 typedef __m128i xmmi;
23 typedef unsigned __int128 uint128_t;
24
25 static const uint32_t ALIGN(16) poly1305_x64_sse2_message_mask[4] = {(1 << 26) - 1, 0, (1 << 26) - 1, 0};
26 static const uint32_t ALIGN(16) poly1305_x64_sse2_5[4] = {5, 0, 5, 0};
27 static const uint32_t ALIGN(16) poly1305_x64_sse2_1shl128[4] = {(1 << 24), 0, (1 << 24), 0};
28
29 static uint128_t INLINE
30 add128(uint128_t a, uint128_t b) {
31 return a + b;
32 }
33
34 static uint128_t INLINE
35 add128_64(uint128_t a, uint64_t b) {
36 return a + b;
37 }
38
39 static uint128_t INLINE
40 mul64x64_128(uint64_t a, uint64_t b) {
41 return (uint128_t)a * b;
42 }
43
44 static uint64_t INLINE
45 lo128(uint128_t a) {
46 return (uint64_t)a;
47 }
48
49 static uint64_t INLINE
50 shr128(uint128_t v, const int shift) {
51 return (uint64_t)(v >> shift);
52 }
53
54 static uint64_t INLINE
55 shr128_pair(uint64_t hi, uint64_t lo, const int shift) {
56 return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift);
57 }
58
59 typedef struct poly1305_power_t {
60 union {
61 xmmi v;
62 uint64_t u[2];
63 uint32_t d[4];
64 } R20,R21,R22,R23,R24,S21,S22,S23,S24;
65 } poly1305_power;
66
67 typedef struct poly1305_state_internal_t {
68 poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 bytes of free storage */
69 union {
70 xmmi H[5]; /* 80 bytes */
71 uint64_t HH[10];
72 };
73 /* uint64_t r0,r1,r2; [24 bytes] */
74 /* uint64_t pad0,pad1; [16 bytes] */
75 uint64_t started; /* 8 bytes */
76 uint64_t leftover; /* 8 bytes */
77 uint8_t buffer[64]; /* 64 bytes */
78 } poly1305_state_internal; /* 448 bytes total + 63 bytes for alignment = 511 bytes raw */
79
80 static poly1305_state_internal INLINE
81 *poly1305_aligned_state(poly1305_state *state) {
82 return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63);
83 }
84
85 /* copy 0-63 bytes */
86 static void INLINE
87 poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) {
88 size_t offset = src - dst;
89 if (bytes & 32) {
90 _mm_storeu_si128((xmmi *)(dst + 0), _mm_loadu_si128((xmmi *)(dst + offset + 0)));
91 _mm_storeu_si128((xmmi *)(dst + 16), _mm_loadu_si128((xmmi *)(dst + offset + 16)));
92 dst += 32;
93 }
94 if (bytes & 16) { _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((xmmi *)(dst + offset))); dst += 16; }
95 if (bytes & 8) { *(uint64_t *)dst = *(uint64_t *)(dst + offset); dst += 8; }
96 if (bytes & 4) { *(uint32_t *)dst = *(uint32_t *)(dst + offset); dst += 4; }
97 if (bytes & 2) { *(uint16_t *)dst = *(uint16_t *)(dst + offset); dst += 2; }
98 if (bytes & 1) { *( uint8_t *)dst = *( uint8_t *)(dst + offset); }
99 }
100
101 /* zero 0-15 bytes */
102 static void INLINE
103 poly1305_block_zero(uint8_t *dst, size_t bytes) {
104 if (bytes & 8) { *(uint64_t *)dst = 0; dst += 8; }
105 if (bytes & 4) { *(uint32_t *)dst = 0; dst += 4; }
106 if (bytes & 2) { *(uint16_t *)dst = 0; dst += 2; }
107 if (bytes & 1) { *( uint8_t *)dst = 0; }
108 }
109
110 static size_t INLINE
111 poly1305_min(size_t a, size_t b) {
112 return (a < b) ? a : b;
113 }
114
115 void
116 Poly1305Init(poly1305_state *state, const unsigned char key[32]) {
117 poly1305_state_internal *st = poly1305_aligned_state(state);
118 poly1305_power *p;
119 uint64_t r0,r1,r2;
120 uint64_t t0,t1;
121
122 /* clamp key */
123 t0 = U8TO64_LE(key + 0);
124 t1 = U8TO64_LE(key + 8);
125 r0 = t0 & 0xffc0fffffff; t0 >>= 44; t0 |= t1 << 20;
126 r1 = t0 & 0xfffffc0ffff; t1 >>= 24;
127 r2 = t1 & 0x00ffffffc0f;
128
129 /* store r in un-used space of st->P[1] */
130 p = &st->P[1];
131 p->R20.d[1] = (uint32_t)(r0 );
132 p->R20.d[3] = (uint32_t)(r0 >> 32);
133 p->R21.d[1] = (uint32_t)(r1 );
134 p->R21.d[3] = (uint32_t)(r1 >> 32);
135 p->R22.d[1] = (uint32_t)(r2 );
136 p->R22.d[3] = (uint32_t)(r2 >> 32);
137
138 /* store pad */
139 p->R23.d[1] = U8TO32_LE(key + 16);
140 p->R23.d[3] = U8TO32_LE(key + 20);
141 p->R24.d[1] = U8TO32_LE(key + 24);
142 p->R24.d[3] = U8TO32_LE(key + 28);
143
144 /* H = 0 */
145 st->H[0] = _mm_setzero_si128();
146 st->H[1] = _mm_setzero_si128();
147 st->H[2] = _mm_setzero_si128();
148 st->H[3] = _mm_setzero_si128();
149 st->H[4] = _mm_setzero_si128();
150
151 st->started = 0;
152 st->leftover = 0;
153 }
154
155 static void
156 poly1305_first_block(poly1305_state_internal *st, const uint8_t *m) {
157 const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
158 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
159 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
160 xmmi T5,T6;
161 poly1305_power *p;
162 uint128_t d[3];
163 uint64_t r0,r1,r2;
164 uint64_t r20,r21,r22,s22;
165 uint64_t pad0,pad1;
166 uint64_t c;
167 uint64_t i;
168
169 /* pull out stored info */
170 p = &st->P[1];
171
172 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
173 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
174 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
175 pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
176 pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
177
178 /* compute powers r^2,r^4 */
179 r20 = r0;
180 r21 = r1;
181 r22 = r2;
182 for (i = 0; i < 2; i++) {
183 s22 = r22 * (5 << 2);
184
185 d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22));
186 d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21));
187 d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20));
188
189 r20 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
190 d[1] = add128_64(d[1], c); r21 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
191 d[2] = add128_64(d[2], c); r22 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
192 r20 += c * 5; c = (r20 >> 44); r20 = r20 & 0xfffffffffff;
193 r21 += c;
194
195 p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)( r20 ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
196 p->R21.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
197 p->R22.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8) ) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
198 p->R23.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), _MM_SHUFFLE(1,0,1,0));
199 p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16) ) ), _MM_SHUFFLE(1,0,1,0));
200 p->S21.v = _mm_mul_epu32(p->R21.v, FIVE);
201 p->S22.v = _mm_mul_epu32(p->R22.v, FIVE);
202 p->S23.v = _mm_mul_epu32(p->R23.v, FIVE);
203 p->S24.v = _mm_mul_epu32(p->R24.v, FIVE);
204 p--;
205 }
206
207 /* put saved info back */
208 p = &st->P[1];
209 p->R20.d[1] = (uint32_t)(r0 );
210 p->R20.d[3] = (uint32_t)(r0 >> 32);
211 p->R21.d[1] = (uint32_t)(r1 );
212 p->R21.d[3] = (uint32_t)(r1 >> 32);
213 p->R22.d[1] = (uint32_t)(r2 );
214 p->R22.d[3] = (uint32_t)(r2 >> 32);
215 p->R23.d[1] = (uint32_t)(pad0 );
216 p->R23.d[3] = (uint32_t)(pad0 >> 32);
217 p->R24.d[1] = (uint32_t)(pad1 );
218 p->R24.d[3] = (uint32_t)(pad1 >> 32);
219
220 /* H = [Mx,My] */
221 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
222 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
223 st->H[0] = _mm_and_si128(MMASK, T5);
224 st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
225 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
226 st->H[2] = _mm_and_si128(MMASK, T5);
227 st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
228 st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
229 }
230
231 static void
232 poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, size_t bytes) {
233 const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
234 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
235 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
236
237 poly1305_power *p;
238 xmmi H0,H1,H2,H3,H4;
239 xmmi T0,T1,T2,T3,T4,T5,T6;
240 xmmi M0,M1,M2,M3,M4;
241 xmmi C1,C2;
242
243 H0 = st->H[0];
244 H1 = st->H[1];
245 H2 = st->H[2];
246 H3 = st->H[3];
247 H4 = st->H[4];
248
249 while (bytes >= 64) {
250 /* H *= [r^4,r^4] */
251 p = &st->P[0];
252 T0 = _mm_mul_epu32(H0, p->R20.v);
253 T1 = _mm_mul_epu32(H0, p->R21.v);
254 T2 = _mm_mul_epu32(H0, p->R22.v);
255 T3 = _mm_mul_epu32(H0, p->R23.v);
256 T4 = _mm_mul_epu32(H0, p->R24.v);
257 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
258 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
259 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
260 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
261 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
262 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
263 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
264 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
265 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
266 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
267 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
268 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
269
270 /* H += [Mx,My]*[r^2,r^2] */
271 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
272 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
273 M0 = _mm_and_si128(MMASK, T5);
274 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
275 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
276 M2 = _mm_and_si128(MMASK, T5);
277 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
278 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
279
280 p = &st->P[1];
281 T5 = _mm_mul_epu32(M0, p->R20.v); T6 = _mm_mul_epu32(M0, p->R21.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
282 T5 = _mm_mul_epu32(M1, p->S24.v); T6 = _mm_mul_epu32(M1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
283 T5 = _mm_mul_epu32(M2, p->S23.v); T6 = _mm_mul_epu32(M2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
284 T5 = _mm_mul_epu32(M3, p->S22.v); T6 = _mm_mul_epu32(M3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
285 T5 = _mm_mul_epu32(M4, p->S21.v); T6 = _mm_mul_epu32(M4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
286 T5 = _mm_mul_epu32(M0, p->R22.v); T6 = _mm_mul_epu32(M0, p->R23.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
287 T5 = _mm_mul_epu32(M1, p->R21.v); T6 = _mm_mul_epu32(M1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
288 T5 = _mm_mul_epu32(M2, p->R20.v); T6 = _mm_mul_epu32(M2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
289 T5 = _mm_mul_epu32(M3, p->S24.v); T6 = _mm_mul_epu32(M3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
290 T5 = _mm_mul_epu32(M4, p->S23.v); T6 = _mm_mul_epu32(M4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
291 T5 = _mm_mul_epu32(M0, p->R24.v); T4 = _mm_add_epi64(T4, T5);
292 T5 = _mm_mul_epu32(M1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
293 T5 = _mm_mul_epu32(M2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
294 T5 = _mm_mul_epu32(M3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
295 T5 = _mm_mul_epu32(M4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
296
297 /* H += [Mx,My] */
298 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 32)), _mm_loadl_epi64((xmmi *)(m + 48)));
299 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 40)), _mm_loadl_epi64((xmmi *)(m + 56)));
300 M0 = _mm_and_si128(MMASK, T5);
301 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
302 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
303 M2 = _mm_and_si128(MMASK, T5);
304 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
305 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
306
307 T0 = _mm_add_epi64(T0, M0);
308 T1 = _mm_add_epi64(T1, M1);
309 T2 = _mm_add_epi64(T2, M2);
310 T3 = _mm_add_epi64(T3, M3);
311 T4 = _mm_add_epi64(T4, M4);
312
313 /* reduce */
314 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
315 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
316 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
317 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
318
319 /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */
320 H0 = T0;
321 H1 = T1;
322 H2 = T2;
323 H3 = T3;
324 H4 = T4;
325
326 m += 64;
327 bytes -= 64;
328 }
329
330 st->H[0] = H0;
331 st->H[1] = H1;
332 st->H[2] = H2;
333 st->H[3] = H3;
334 st->H[4] = H4;
335 }
336
337 static size_t
338 poly1305_combine(poly1305_state_internal *st, const uint8_t *m, size_t bytes) {
339 const xmmi MMASK = _mm_load_si128((xmmi *)poly1305_x64_sse2_message_mask);
340 const xmmi HIBIT = _mm_load_si128((xmmi*)poly1305_x64_sse2_1shl128);
341 const xmmi FIVE = _mm_load_si128((xmmi*)poly1305_x64_sse2_5);
342
343 poly1305_power *p;
344 xmmi H0,H1,H2,H3,H4;
345 xmmi M0,M1,M2,M3,M4;
346 xmmi T0,T1,T2,T3,T4,T5,T6;
347 xmmi C1,C2;
348
349 uint64_t r0,r1,r2;
350 uint64_t t0,t1,t2,t3,t4;
351 uint64_t c;
352 size_t consumed = 0;
353
354 H0 = st->H[0];
355 H1 = st->H[1];
356 H2 = st->H[2];
357 H3 = st->H[3];
358 H4 = st->H[4];
359
360 /* p = [r^2,r^2] */
361 p = &st->P[1];
362
363 if (bytes >= 32) {
364 /* H *= [r^2,r^2] */
365 T0 = _mm_mul_epu32(H0, p->R20.v);
366 T1 = _mm_mul_epu32(H0, p->R21.v);
367 T2 = _mm_mul_epu32(H0, p->R22.v);
368 T3 = _mm_mul_epu32(H0, p->R23.v);
369 T4 = _mm_mul_epu32(H0, p->R24.v);
370 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
371 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
372 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
373 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
374 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
375 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
376 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
377 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
378 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
379 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
380 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
381 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
382
383 /* H += [Mx,My] */
384 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 0)), _mm_loadl_epi64((xmmi *)(m + 16)));
385 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((xmmi *)(m + 8)), _mm_loadl_epi64((xmmi *)(m + 24)));
386 M0 = _mm_and_si128(MMASK, T5);
387 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
388 T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12));
389 M2 = _mm_and_si128(MMASK, T5);
390 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26));
391 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
392
393 T0 = _mm_add_epi64(T0, M0);
394 T1 = _mm_add_epi64(T1, M1);
395 T2 = _mm_add_epi64(T2, M2);
396 T3 = _mm_add_epi64(T3, M3);
397 T4 = _mm_add_epi64(T4, M4);
398
399 /* reduce */
400 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
401 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
402 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
403 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
404
405 /* H = (H*[r^2,r^2] + [Mx,My]) */
406 H0 = T0;
407 H1 = T1;
408 H2 = T2;
409 H3 = T3;
410 H4 = T4;
411
412 consumed = 32;
413 }
414
415 /* finalize, H *= [r^2,r] */
416 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
417 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
418 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
419
420 p->R20.d[2] = (uint32_t)( r0 ) & 0x3ffffff;
421 p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff;
422 p->R22.d[2] = (uint32_t)((r1 >> 8) ) & 0x3ffffff;
423 p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff;
424 p->R24.d[2] = (uint32_t)((r2 >> 16) ) ;
425 p->S21.d[2] = p->R21.d[2] * 5;
426 p->S22.d[2] = p->R22.d[2] * 5;
427 p->S23.d[2] = p->R23.d[2] * 5;
428 p->S24.d[2] = p->R24.d[2] * 5;
429
430 /* H *= [r^2,r] */
431 T0 = _mm_mul_epu32(H0, p->R20.v);
432 T1 = _mm_mul_epu32(H0, p->R21.v);
433 T2 = _mm_mul_epu32(H0, p->R22.v);
434 T3 = _mm_mul_epu32(H0, p->R23.v);
435 T4 = _mm_mul_epu32(H0, p->R24.v);
436 T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
437 T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
438 T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
439 T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6);
440 T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
441 T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
442 T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
443 T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6);
444 T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5);
445 T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5);
446 T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5);
447 T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5);
448
449 C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2);
450 C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
451 C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2);
452 C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1);
453
454 /* H = H[0]+H[1] */
455 H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8));
456 H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8));
457 H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8));
458 H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8));
459 H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8));
460
461 t0 = _mm_cvtsi128_si32(H0) ; c = (t0 >> 26); t0 &= 0x3ffffff;
462 t1 = _mm_cvtsi128_si32(H1) + c; c = (t1 >> 26); t1 &= 0x3ffffff;
463 t2 = _mm_cvtsi128_si32(H2) + c; c = (t2 >> 26); t2 &= 0x3ffffff;
464 t3 = _mm_cvtsi128_si32(H3) + c; c = (t3 >> 26); t3 &= 0x3ffffff;
465 t4 = _mm_cvtsi128_si32(H4) + c; c = (t4 >> 26); t4 &= 0x3ffffff;
466 t0 = t0 + (c * 5); c = (t0 >> 26); t0 &= 0x3ffffff;
467 t1 = t1 + c;
468
469 st->HH[0] = ((t0 ) | (t1 << 26) ) & 0xfffffffffffull;
470 st->HH[1] = ((t1 >> 18) | (t2 << 8) | (t3 << 34)) & 0xfffffffffffull;
471 st->HH[2] = ((t3 >> 10) | (t4 << 16) ) & 0x3ffffffffffull;
472
473 return consumed;
474 }
475
476 void
477 Poly1305Update(poly1305_state *state, const unsigned char *m, size_t bytes) {
478 poly1305_state_internal *st = poly1305_aligned_state(state);
479 size_t want;
480
481 /* need at least 32 initial bytes to start the accelerated branch */
482 if (!st->started) {
483 if ((st->leftover == 0) && (bytes > 32)) {
484 poly1305_first_block(st, m);
485 m += 32;
486 bytes -= 32;
487 } else {
488 want = poly1305_min(32 - st->leftover, bytes);
489 poly1305_block_copy(st->buffer + st->leftover, m, want);
490 bytes -= want;
491 m += want;
492 st->leftover += want;
493 if ((st->leftover < 32) || (bytes == 0))
494 return;
495 poly1305_first_block(st, st->buffer);
496 st->leftover = 0;
497 }
498 st->started = 1;
499 }
500
501 /* handle leftover */
502 if (st->leftover) {
503 want = poly1305_min(64 - st->leftover, bytes);
504 poly1305_block_copy(st->buffer + st->leftover, m, want);
505 bytes -= want;
506 m += want;
507 st->leftover += want;
508 if (st->leftover < 64)
509 return;
510 poly1305_blocks(st, st->buffer, 64);
511 st->leftover = 0;
512 }
513
514 /* process 64 byte blocks */
515 if (bytes >= 64) {
516 want = (bytes & ~63);
517 poly1305_blocks(st, m, want);
518 m += want;
519 bytes -= want;
520 }
521
522 if (bytes) {
523 poly1305_block_copy(st->buffer + st->leftover, m, bytes);
524 st->leftover += bytes;
525 }
526 }
527
528 void
529 Poly1305Finish(poly1305_state *state, unsigned char mac[16]) {
530 poly1305_state_internal *st = poly1305_aligned_state(state);
531 size_t leftover = st->leftover;
532 uint8_t *m = st->buffer;
533 uint128_t d[3];
534 uint64_t h0,h1,h2;
535 uint64_t t0,t1;
536 uint64_t g0,g1,g2,c,nc;
537 uint64_t r0,r1,r2,s1,s2;
538 poly1305_power *p;
539
540 if (st->started) {
541 size_t consumed = poly1305_combine(st, m, leftover);
542 leftover -= consumed;
543 m += consumed;
544 }
545
546 /* st->HH will either be 0 or have the combined result */
547 h0 = st->HH[0];
548 h1 = st->HH[1];
549 h2 = st->HH[2];
550
551 p = &st->P[1];
552 r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1];
553 r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1];
554 r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1];
555 s1 = r1 * (5 << 2);
556 s2 = r2 * (5 << 2);
557
558 if (leftover < 16)
559 goto poly1305_donna_atmost15bytes;
560
561 poly1305_donna_atleast16bytes:
562 t0 = U8TO64_LE(m + 0);
563 t1 = U8TO64_LE(m + 8);
564 h0 += t0 & 0xfffffffffff;
565 t0 = shr128_pair(t1, t0, 44);
566 h1 += t0 & 0xfffffffffff;
567 h2 += (t1 >> 24) | ((uint64_t)1 << 40);
568
569 poly1305_donna_mul:
570 d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), mul64x64_128(h2, s1));
571 d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), mul64x64_128(h2, s2));
572 d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), mul64x64_128(h2, r0));
573 h0 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44);
574 d[1] = add128_64(d[1], c); h1 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44);
575 d[2] = add128_64(d[2], c); h2 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42);
576 h0 += c * 5;
577
578 m += 16;
579 leftover -= 16;
580 if (leftover >= 16) goto poly1305_donna_atleast16bytes;
581
582 /* final bytes */
583 poly1305_donna_atmost15bytes:
584 if (!leftover) goto poly1305_donna_finish;
585
586 m[leftover++] = 1;
587 poly1305_block_zero(m + leftover, 16 - leftover);
588 leftover = 16;
589
590 t0 = U8TO64_LE(m+0);
591 t1 = U8TO64_LE(m+8);
592 h0 += t0 & 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
593 h1 += t0 & 0xfffffffffff;
594 h2 += (t1 >> 24);
595
596 goto poly1305_donna_mul;
597
598 poly1305_donna_finish:
599 c = (h0 >> 44); h0 &= 0xfffffffffff;
600 h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff;
601 h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
602 h0 += c * 5;
603
604 g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff;
605 g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff;
606 g2 = h2 + c - ((uint64_t)1 << 42);
607
608 c = (g2 >> 63) - 1;
609 nc = ~c;
610 h0 = (h0 & nc) | (g0 & c);
611 h1 = (h1 & nc) | (g1 & c);
612 h2 = (h2 & nc) | (g2 & c);
613
614 /* pad */
615 t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1];
616 t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1];
617 h0 += (t0 & 0xfffffffffff) ; c = (h0 >> 44); h0 &= 0xfffffffffff; t0 = shr128_pair(t1, t0, 44);
618 h1 += (t0 & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff; t1 = (t1 >> 24);
619 h2 += (t1 ) + c;
620
621 U64TO8_LE(mac + 0, ((h0 ) | (h1 << 44)));
622 U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24)));
623 }
This site is hosted by Intevation GmbH (Datenschutzerklärung und Impressum | Privacy Policy and Imprint)