GNU Linux-libre 4.9.301-gnu1
[releases.git] / crypto / async_tx / async_pq.c
1 /*
2  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3  * Copyright(c) 2009 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the Free
7  * Software Foundation; either version 2 of the License, or (at your option)
8  * any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 59
17  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
18  *
19  * The full GNU General Public License is included in this distribution in the
20  * file called COPYING.
21  */
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/raid/pq.h>
27 #include <linux/async_tx.h>
28 #include <linux/gfp.h>
29
30 /**
31  * pq_scribble_page - space to hold throwaway P or Q buffer for
32  * synchronous gen_syndrome
33  */
34 static struct page *pq_scribble_page;
35
36 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
37  * and async_syndrome_val() contains the 'P' destination address at
38  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
39  *
40  * note: these are macros as they are used as lvalues
41  */
42 #define P(b, d) (b[d-2])
43 #define Q(b, d) (b[d-1])
44
45 /**
46  * do_async_gen_syndrome - asynchronously calculate P and/or Q
47  */
48 static __async_inline struct dma_async_tx_descriptor *
49 do_async_gen_syndrome(struct dma_chan *chan,
50                       const unsigned char *scfs, int disks,
51                       struct dmaengine_unmap_data *unmap,
52                       enum dma_ctrl_flags dma_flags,
53                       struct async_submit_ctl *submit)
54 {
55         struct dma_async_tx_descriptor *tx = NULL;
56         struct dma_device *dma = chan->device;
57         enum async_tx_flags flags_orig = submit->flags;
58         dma_async_tx_callback cb_fn_orig = submit->cb_fn;
59         dma_async_tx_callback cb_param_orig = submit->cb_param;
60         int src_cnt = disks - 2;
61         unsigned short pq_src_cnt;
62         dma_addr_t dma_dest[2];
63         int src_off = 0;
64
65         while (src_cnt > 0) {
66                 submit->flags = flags_orig;
67                 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
68                 /* if we are submitting additional pqs, leave the chain open,
69                  * clear the callback parameters, and leave the destination
70                  * buffers mapped
71                  */
72                 if (src_cnt > pq_src_cnt) {
73                         submit->flags &= ~ASYNC_TX_ACK;
74                         submit->flags |= ASYNC_TX_FENCE;
75                         submit->cb_fn = NULL;
76                         submit->cb_param = NULL;
77                 } else {
78                         submit->cb_fn = cb_fn_orig;
79                         submit->cb_param = cb_param_orig;
80                         if (cb_fn_orig)
81                                 dma_flags |= DMA_PREP_INTERRUPT;
82                 }
83                 if (submit->flags & ASYNC_TX_FENCE)
84                         dma_flags |= DMA_PREP_FENCE;
85
86                 /* Drivers force forward progress in case they can not provide
87                  * a descriptor
88                  */
89                 for (;;) {
90                         dma_dest[0] = unmap->addr[disks - 2];
91                         dma_dest[1] = unmap->addr[disks - 1];
92                         tx = dma->device_prep_dma_pq(chan, dma_dest,
93                                                      &unmap->addr[src_off],
94                                                      pq_src_cnt,
95                                                      &scfs[src_off], unmap->len,
96                                                      dma_flags);
97                         if (likely(tx))
98                                 break;
99                         async_tx_quiesce(&submit->depend_tx);
100                         dma_async_issue_pending(chan);
101                 }
102
103                 dma_set_unmap(tx, unmap);
104                 async_tx_submit(chan, tx, submit);
105                 submit->depend_tx = tx;
106
107                 /* drop completed sources */
108                 src_cnt -= pq_src_cnt;
109                 src_off += pq_src_cnt;
110
111                 dma_flags |= DMA_PREP_CONTINUE;
112         }
113
114         return tx;
115 }
116
117 /**
118  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
119  */
120 static void
121 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
122                      size_t len, struct async_submit_ctl *submit)
123 {
124         void **srcs;
125         int i;
126         int start = -1, stop = disks - 3;
127
128         if (submit->scribble)
129                 srcs = submit->scribble;
130         else
131                 srcs = (void **) blocks;
132
133         for (i = 0; i < disks; i++) {
134                 if (blocks[i] == NULL) {
135                         BUG_ON(i > disks - 3); /* P or Q can't be zero */
136                         srcs[i] = (void*)raid6_empty_zero_page;
137                 } else {
138                         srcs[i] = page_address(blocks[i]) + offset;
139                         if (i < disks - 2) {
140                                 stop = i;
141                                 if (start == -1)
142                                         start = i;
143                         }
144                 }
145         }
146         if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
147                 BUG_ON(!raid6_call.xor_syndrome);
148                 if (start >= 0)
149                         raid6_call.xor_syndrome(disks, start, stop, len, srcs);
150         } else
151                 raid6_call.gen_syndrome(disks, len, srcs);
152         async_tx_sync_epilog(submit);
153 }
154
155 /**
156  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158  * @offset: common offset into each block (src and dest) to start transaction
159  * @disks: number of blocks (including missing P or Q, see below)
160  * @len: length of operation in bytes
161  * @submit: submission/completion modifiers
162  *
163  * General note: This routine assumes a field of GF(2^8) with a
164  * primitive polynomial of 0x11d and a generator of {02}.
165  *
166  * 'disks' note: callers can optionally omit either P or Q (but not
167  * both) from the calculation by setting blocks[disks-2] or
168  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
169  * PAGE_SIZE as a temporary buffer of this size is used in the
170  * synchronous path.  'disks' always accounts for both destination
171  * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
172  * set to NULL those buffers will be replaced with the raid6_zero_page
173  * in the synchronous path and omitted in the hardware-asynchronous
174  * path.
175  */
176 struct dma_async_tx_descriptor *
177 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
178                    size_t len, struct async_submit_ctl *submit)
179 {
180         int src_cnt = disks - 2;
181         struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182                                                       &P(blocks, disks), 2,
183                                                       blocks, src_cnt, len);
184         struct dma_device *device = chan ? chan->device : NULL;
185         struct dmaengine_unmap_data *unmap = NULL;
186
187         BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
188
189         if (device)
190                 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191
192         /* XORing P/Q is only implemented in software */
193         if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194             (src_cnt <= dma_maxpq(device, 0) ||
195              dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196             is_dma_pq_aligned(device, offset, 0, len)) {
197                 struct dma_async_tx_descriptor *tx;
198                 enum dma_ctrl_flags dma_flags = 0;
199                 unsigned char coefs[src_cnt];
200                 int i, j;
201
202                 /* run the p+q asynchronously */
203                 pr_debug("%s: (async) disks: %d len: %zu\n",
204                          __func__, disks, len);
205
206                 /* convert source addresses being careful to collapse 'empty'
207                  * sources and update the coefficients accordingly
208                  */
209                 unmap->len = len;
210                 for (i = 0, j = 0; i < src_cnt; i++) {
211                         if (blocks[i] == NULL)
212                                 continue;
213                         unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
214                                                       len, DMA_TO_DEVICE);
215                         coefs[j] = raid6_gfexp[i];
216                         unmap->to_cnt++;
217                         j++;
218                 }
219
220                 /*
221                  * DMAs use destinations as sources,
222                  * so use BIDIRECTIONAL mapping
223                  */
224                 unmap->bidi_cnt++;
225                 if (P(blocks, disks))
226                         unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227                                                         offset, len, DMA_BIDIRECTIONAL);
228                 else {
229                         unmap->addr[j++] = 0;
230                         dma_flags |= DMA_PREP_PQ_DISABLE_P;
231                 }
232
233                 unmap->bidi_cnt++;
234                 if (Q(blocks, disks))
235                         unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
236                                                        offset, len, DMA_BIDIRECTIONAL);
237                 else {
238                         unmap->addr[j++] = 0;
239                         dma_flags |= DMA_PREP_PQ_DISABLE_Q;
240                 }
241
242                 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
243                 dmaengine_unmap_put(unmap);
244                 return tx;
245         }
246
247         dmaengine_unmap_put(unmap);
248
249         /* run the pq synchronously */
250         pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
251
252         /* wait for any prerequisite operations */
253         async_tx_quiesce(&submit->depend_tx);
254
255         if (!P(blocks, disks)) {
256                 P(blocks, disks) = pq_scribble_page;
257                 BUG_ON(len + offset > PAGE_SIZE);
258         }
259         if (!Q(blocks, disks)) {
260                 Q(blocks, disks) = pq_scribble_page;
261                 BUG_ON(len + offset > PAGE_SIZE);
262         }
263         do_sync_gen_syndrome(blocks, offset, disks, len, submit);
264
265         return NULL;
266 }
267 EXPORT_SYMBOL_GPL(async_gen_syndrome);
268
269 static inline struct dma_chan *
270 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
271 {
272         #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
273         return NULL;
274         #endif
275         return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
276                                      disks, len);
277 }
278
279 /**
280  * async_syndrome_val - asynchronously validate a raid6 syndrome
281  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
282  * @offset: common offset into each block (src and dest) to start transaction
283  * @disks: number of blocks (including missing P or Q, see below)
284  * @len: length of operation in bytes
285  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
286  * @spare: temporary result buffer for the synchronous case
287  * @submit: submission / completion modifiers
288  *
289  * The same notes from async_gen_syndrome apply to the 'blocks',
290  * and 'disks' parameters of this routine.  The synchronous path
291  * requires a temporary result buffer and submit->scribble to be
292  * specified.
293  */
294 struct dma_async_tx_descriptor *
295 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
296                    size_t len, enum sum_check_flags *pqres, struct page *spare,
297                    struct async_submit_ctl *submit)
298 {
299         struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
300         struct dma_device *device = chan ? chan->device : NULL;
301         struct dma_async_tx_descriptor *tx;
302         unsigned char coefs[disks-2];
303         enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
304         struct dmaengine_unmap_data *unmap = NULL;
305
306         BUG_ON(disks < 4);
307
308         if (device)
309                 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
310
311         if (unmap && disks <= dma_maxpq(device, 0) &&
312             is_dma_pq_aligned(device, offset, 0, len)) {
313                 struct device *dev = device->dev;
314                 dma_addr_t pq[2];
315                 int i, j = 0, src_cnt = 0;
316
317                 pr_debug("%s: (async) disks: %d len: %zu\n",
318                          __func__, disks, len);
319
320                 unmap->len = len;
321                 for (i = 0; i < disks-2; i++)
322                         if (likely(blocks[i])) {
323                                 unmap->addr[j] = dma_map_page(dev, blocks[i],
324                                                               offset, len,
325                                                               DMA_TO_DEVICE);
326                                 coefs[j] = raid6_gfexp[i];
327                                 unmap->to_cnt++;
328                                 src_cnt++;
329                                 j++;
330                         }
331
332                 if (!P(blocks, disks)) {
333                         pq[0] = 0;
334                         dma_flags |= DMA_PREP_PQ_DISABLE_P;
335                 } else {
336                         pq[0] = dma_map_page(dev, P(blocks, disks),
337                                              offset, len,
338                                              DMA_TO_DEVICE);
339                         unmap->addr[j++] = pq[0];
340                         unmap->to_cnt++;
341                 }
342                 if (!Q(blocks, disks)) {
343                         pq[1] = 0;
344                         dma_flags |= DMA_PREP_PQ_DISABLE_Q;
345                 } else {
346                         pq[1] = dma_map_page(dev, Q(blocks, disks),
347                                              offset, len,
348                                              DMA_TO_DEVICE);
349                         unmap->addr[j++] = pq[1];
350                         unmap->to_cnt++;
351                 }
352
353                 if (submit->flags & ASYNC_TX_FENCE)
354                         dma_flags |= DMA_PREP_FENCE;
355                 for (;;) {
356                         tx = device->device_prep_dma_pq_val(chan, pq,
357                                                             unmap->addr,
358                                                             src_cnt,
359                                                             coefs,
360                                                             len, pqres,
361                                                             dma_flags);
362                         if (likely(tx))
363                                 break;
364                         async_tx_quiesce(&submit->depend_tx);
365                         dma_async_issue_pending(chan);
366                 }
367
368                 dma_set_unmap(tx, unmap);
369                 async_tx_submit(chan, tx, submit);
370         } else {
371                 struct page *p_src = P(blocks, disks);
372                 struct page *q_src = Q(blocks, disks);
373                 enum async_tx_flags flags_orig = submit->flags;
374                 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
375                 void *scribble = submit->scribble;
376                 void *cb_param_orig = submit->cb_param;
377                 void *p, *q, *s;
378
379                 pr_debug("%s: (sync) disks: %d len: %zu\n",
380                          __func__, disks, len);
381
382                 /* caller must provide a temporary result buffer and
383                  * allow the input parameters to be preserved
384                  */
385                 BUG_ON(!spare || !scribble);
386
387                 /* wait for any prerequisite operations */
388                 async_tx_quiesce(&submit->depend_tx);
389
390                 /* recompute p and/or q into the temporary buffer and then
391                  * check to see the result matches the current value
392                  */
393                 tx = NULL;
394                 *pqres = 0;
395                 if (p_src) {
396                         init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
397                                           NULL, NULL, scribble);
398                         tx = async_xor(spare, blocks, offset, disks-2, len, submit);
399                         async_tx_quiesce(&tx);
400                         p = page_address(p_src) + offset;
401                         s = page_address(spare) + offset;
402                         *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
403                 }
404
405                 if (q_src) {
406                         P(blocks, disks) = NULL;
407                         Q(blocks, disks) = spare;
408                         init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
409                         tx = async_gen_syndrome(blocks, offset, disks, len, submit);
410                         async_tx_quiesce(&tx);
411                         q = page_address(q_src) + offset;
412                         s = page_address(spare) + offset;
413                         *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
414                 }
415
416                 /* restore P, Q and submit */
417                 P(blocks, disks) = p_src;
418                 Q(blocks, disks) = q_src;
419
420                 submit->cb_fn = cb_fn_orig;
421                 submit->cb_param = cb_param_orig;
422                 submit->flags = flags_orig;
423                 async_tx_sync_epilog(submit);
424                 tx = NULL;
425         }
426         dmaengine_unmap_put(unmap);
427
428         return tx;
429 }
430 EXPORT_SYMBOL_GPL(async_syndrome_val);
431
432 static int __init async_pq_init(void)
433 {
434         pq_scribble_page = alloc_page(GFP_KERNEL);
435
436         if (pq_scribble_page)
437                 return 0;
438
439         pr_err("%s: failed to allocate required spare page\n", __func__);
440
441         return -ENOMEM;
442 }
443
444 static void __exit async_pq_exit(void)
445 {
446         __free_page(pq_scribble_page);
447 }
448
449 module_init(async_pq_init);
450 module_exit(async_pq_exit);
451
452 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
453 MODULE_LICENSE("GPL");