2 * Copyright (C) 2012 Intel Corporation
3 * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
15 * AArch32 does not provide this intrinsic natively because it does not
16 * implement the underlying instruction. AArch32 only provides a 64-bit
17 * wide vtbl.8 instruction, so use that instead.
19 static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
26 return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
27 vtbl2_u8(__a.pair, vget_high_u8(b)));
31 void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
32 uint8_t *dq, const uint8_t *pbmul,
35 uint8x16_t pm0 = vld1q_u8(pbmul);
36 uint8x16_t pm1 = vld1q_u8(pbmul + 16);
37 uint8x16_t qm0 = vld1q_u8(qmul);
38 uint8x16_t qm1 = vld1q_u8(qmul + 16);
39 uint8x16_t x0f = vdupq_n_u8(0x0f);
46 * qx = qmul[*q ^ *dq];
47 * *dq++ = db = pbmul[px] ^ qx;
54 uint8x16_t vx, vy, px, qx, db;
56 px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
57 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
59 vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
60 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
61 vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
62 qx = veorq_u8(vx, vy);
64 vy = (uint8x16_t)vshrq_n_s16((int16x8_t)px, 4);
65 vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
66 vy = vqtbl1q_u8(pm1, vandq_u8(vy, x0f));
67 vx = veorq_u8(vx, vy);
68 db = veorq_u8(vx, qx);
71 vst1q_u8(dp, veorq_u8(db, px));
81 void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
84 uint8x16_t qm0 = vld1q_u8(qmul);
85 uint8x16_t qm1 = vld1q_u8(qmul + 16);
86 uint8x16_t x0f = vdupq_n_u8(0x0f);
90 * *p++ ^= *dq = qmul[*q ^ *dq];
98 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
100 vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
101 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
102 vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
103 vx = veorq_u8(vx, vy);
104 vy = veorq_u8(vx, vld1q_u8(p));