2 * Shared glue code for 128bit block ciphers, AVX assembler macros
4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
19 vmovdqu (0*16)(src), x0; \
20 vmovdqu (1*16)(src), x1; \
21 vmovdqu (2*16)(src), x2; \
22 vmovdqu (3*16)(src), x3; \
23 vmovdqu (4*16)(src), x4; \
24 vmovdqu (5*16)(src), x5; \
25 vmovdqu (6*16)(src), x6; \
26 vmovdqu (7*16)(src), x7;
28 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
29 vmovdqu x0, (0*16)(dst); \
30 vmovdqu x1, (1*16)(dst); \
31 vmovdqu x2, (2*16)(dst); \
32 vmovdqu x3, (3*16)(dst); \
33 vmovdqu x4, (4*16)(dst); \
34 vmovdqu x5, (5*16)(dst); \
35 vmovdqu x6, (6*16)(dst); \
36 vmovdqu x7, (7*16)(dst);
38 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
39 vpxor (0*16)(src), x1, x1; \
40 vpxor (1*16)(src), x2, x2; \
41 vpxor (2*16)(src), x3, x3; \
42 vpxor (3*16)(src), x4, x4; \
43 vpxor (4*16)(src), x5, x5; \
44 vpxor (5*16)(src), x6, x6; \
45 vpxor (6*16)(src), x7, x7; \
46 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
48 #define inc_le128(x, minus_one, tmp) \
49 vpcmpeqq minus_one, x, tmp; \
50 vpsubq minus_one, x, x; \
51 vpslldq $8, tmp, tmp; \
54 #define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \
55 vpcmpeqd t0, t0, t0; \
56 vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \
59 /* load IV and byteswap */ \
64 inc_le128(x7, t0, t2); \
66 inc_le128(x7, t0, t2); \
68 inc_le128(x7, t0, t2); \
70 inc_le128(x7, t0, t2); \
72 inc_le128(x7, t0, t2); \
74 inc_le128(x7, t0, t2); \
76 inc_le128(x7, t0, t2); \
79 inc_le128(t2, t0, t1); \
82 #define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
83 vpxor (0*16)(src), x0, x0; \
84 vpxor (1*16)(src), x1, x1; \
85 vpxor (2*16)(src), x2, x2; \
86 vpxor (3*16)(src), x3, x3; \
87 vpxor (4*16)(src), x4, x4; \
88 vpxor (5*16)(src), x5, x5; \
89 vpxor (6*16)(src), x6, x6; \
90 vpxor (7*16)(src), x7, x7; \
91 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
93 #define gf128mul_x_ble(iv, mask, tmp) \
94 vpsrad $31, iv, tmp; \
96 vpshufd $0x13, tmp, tmp; \
97 vpand mask, tmp, tmp; \
100 #define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \
101 t1, xts_gf128mul_and_shl1_mask) \
102 vmovdqa xts_gf128mul_and_shl1_mask, t0; \
106 vpxor (0*16)(src), tiv, x0; \
107 vmovdqu tiv, (0*16)(dst); \
109 /* construct and store IVs, also xor with source */ \
110 gf128mul_x_ble(tiv, t0, t1); \
111 vpxor (1*16)(src), tiv, x1; \
112 vmovdqu tiv, (1*16)(dst); \
114 gf128mul_x_ble(tiv, t0, t1); \
115 vpxor (2*16)(src), tiv, x2; \
116 vmovdqu tiv, (2*16)(dst); \
118 gf128mul_x_ble(tiv, t0, t1); \
119 vpxor (3*16)(src), tiv, x3; \
120 vmovdqu tiv, (3*16)(dst); \
122 gf128mul_x_ble(tiv, t0, t1); \
123 vpxor (4*16)(src), tiv, x4; \
124 vmovdqu tiv, (4*16)(dst); \
126 gf128mul_x_ble(tiv, t0, t1); \
127 vpxor (5*16)(src), tiv, x5; \
128 vmovdqu tiv, (5*16)(dst); \
130 gf128mul_x_ble(tiv, t0, t1); \
131 vpxor (6*16)(src), tiv, x6; \
132 vmovdqu tiv, (6*16)(dst); \
134 gf128mul_x_ble(tiv, t0, t1); \
135 vpxor (7*16)(src), tiv, x7; \
136 vmovdqu tiv, (7*16)(dst); \
138 gf128mul_x_ble(tiv, t0, t1); \
141 #define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
142 vpxor (0*16)(dst), x0, x0; \
143 vpxor (1*16)(dst), x1, x1; \
144 vpxor (2*16)(dst), x2, x2; \
145 vpxor (3*16)(dst), x3, x3; \
146 vpxor (4*16)(dst), x4, x4; \
147 vpxor (5*16)(dst), x5, x5; \
148 vpxor (6*16)(dst), x6, x6; \
149 vpxor (7*16)(dst), x7, x7; \
150 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);