GNU Linux-libre 4.4.299-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_uclo.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
56
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
61
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63                                  unsigned int ae, unsigned int image_num)
64 {
65         struct icp_qat_uclo_aedata *ae_data;
66         struct icp_qat_uclo_encapme *encap_image;
67         struct icp_qat_uclo_page *page = NULL;
68         struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70         ae_data = &obj_handle->ae_data[ae];
71         encap_image = &obj_handle->ae_uimage[image_num];
72         ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73         ae_slice->encap_image = encap_image;
74
75         if (encap_image->img_ptr) {
76                 ae_slice->ctx_mask_assigned =
77                                         encap_image->img_ptr->ctx_assigned;
78                 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79         } else {
80                 ae_slice->ctx_mask_assigned = 0;
81         }
82         ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83         if (!ae_slice->region)
84                 return -ENOMEM;
85         ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86         if (!ae_slice->page)
87                 goto out_err;
88         page = ae_slice->page;
89         page->encap_page = encap_image->page;
90         ae_slice->page->region = ae_slice->region;
91         ae_data->slice_num++;
92         return 0;
93 out_err:
94         kfree(ae_slice->region);
95         ae_slice->region = NULL;
96         return -ENOMEM;
97 }
98
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100 {
101         unsigned int i;
102
103         if (!ae_data) {
104                 pr_err("QAT: bad argument, ae_data is NULL\n ");
105                 return -EINVAL;
106         }
107
108         for (i = 0; i < ae_data->slice_num; i++) {
109                 kfree(ae_data->ae_slices[i].region);
110                 ae_data->ae_slices[i].region = NULL;
111                 kfree(ae_data->ae_slices[i].page);
112                 ae_data->ae_slices[i].page = NULL;
113         }
114         return 0;
115 }
116
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118                                  unsigned int str_offset)
119 {
120         if ((!str_table->table_len) || (str_offset > str_table->table_len))
121                 return NULL;
122         return (char *)(((unsigned long)(str_table->strings)) + str_offset);
123 }
124
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
126 {
127         int maj = hdr->maj_ver & 0xff;
128         int min = hdr->min_ver & 0xff;
129
130         if (hdr->file_id != ICP_QAT_UOF_FID) {
131                 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132                 return -EINVAL;
133         }
134         if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135                 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136                        maj, min);
137                 return -EINVAL;
138         }
139         return 0;
140 }
141
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143                                       unsigned int addr, unsigned int *val,
144                                       unsigned int num_in_bytes)
145 {
146         unsigned int outval;
147         unsigned char *ptr = (unsigned char *)val;
148
149         while (num_in_bytes) {
150                 memcpy(&outval, ptr, 4);
151                 SRAM_WRITE(handle, addr, outval);
152                 num_in_bytes -= 4;
153                 ptr += 4;
154                 addr += 4;
155         }
156 }
157
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159                                       unsigned char ae, unsigned int addr,
160                                       unsigned int *val,
161                                       unsigned int num_in_bytes)
162 {
163         unsigned int outval;
164         unsigned char *ptr = (unsigned char *)val;
165
166         addr >>= 0x2; /* convert to uword address */
167
168         while (num_in_bytes) {
169                 memcpy(&outval, ptr, 4);
170                 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
171                 num_in_bytes -= 4;
172                 ptr += 4;
173         }
174 }
175
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
177                                    unsigned char ae,
178                                    struct icp_qat_uof_batch_init
179                                    *umem_init_header)
180 {
181         struct icp_qat_uof_batch_init *umem_init;
182
183         if (!umem_init_header)
184                 return;
185         umem_init = umem_init_header->next;
186         while (umem_init) {
187                 unsigned int addr, *value, size;
188
189                 ae = umem_init->ae;
190                 addr = umem_init->addr;
191                 value = umem_init->value;
192                 size = umem_init->size;
193                 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194                 umem_init = umem_init->next;
195         }
196 }
197
198 static void
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200                                  struct icp_qat_uof_batch_init **base)
201 {
202         struct icp_qat_uof_batch_init *umem_init;
203
204         umem_init = *base;
205         while (umem_init) {
206                 struct icp_qat_uof_batch_init *pre;
207
208                 pre = umem_init;
209                 umem_init = umem_init->next;
210                 kfree(pre);
211         }
212         *base = NULL;
213 }
214
215 static int qat_uclo_parse_num(char *str, unsigned int *num)
216 {
217         char buf[16] = {0};
218         unsigned long ae = 0;
219         int i;
220
221         strncpy(buf, str, 15);
222         for (i = 0; i < 16; i++) {
223                 if (!isdigit(buf[i])) {
224                         buf[i] = '\0';
225                         break;
226                 }
227         }
228         if ((kstrtoul(buf, 10, &ae)))
229                 return -EFAULT;
230
231         *num = (unsigned int)ae;
232         return 0;
233 }
234
235 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
236                                      struct icp_qat_uof_initmem *init_mem,
237                                      unsigned int size_range, unsigned int *ae)
238 {
239         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
240         char *str;
241
242         if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
243                 pr_err("QAT: initmem is out of range");
244                 return -EINVAL;
245         }
246         if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
247                 pr_err("QAT: Memory scope for init_mem error\n");
248                 return -EINVAL;
249         }
250         str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
251         if (!str) {
252                 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
253                 return -EINVAL;
254         }
255         if (qat_uclo_parse_num(str, ae)) {
256                 pr_err("QAT: Parse num for AE number failed\n");
257                 return -EINVAL;
258         }
259         if (*ae >= ICP_QAT_UCLO_MAX_AE) {
260                 pr_err("QAT: ae %d out of range\n", *ae);
261                 return -EINVAL;
262         }
263         return 0;
264 }
265
266 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
267                                            *handle, struct icp_qat_uof_initmem
268                                            *init_mem, unsigned int ae,
269                                            struct icp_qat_uof_batch_init
270                                            **init_tab_base)
271 {
272         struct icp_qat_uof_batch_init *init_header, *tail;
273         struct icp_qat_uof_batch_init *mem_init, *tail_old;
274         struct icp_qat_uof_memvar_attr *mem_val_attr;
275         unsigned int i, flag = 0;
276
277         mem_val_attr =
278                 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
279                 sizeof(struct icp_qat_uof_initmem));
280
281         init_header = *init_tab_base;
282         if (!init_header) {
283                 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
284                 if (!init_header)
285                         return -ENOMEM;
286                 init_header->size = 1;
287                 *init_tab_base = init_header;
288                 flag = 1;
289         }
290         tail_old = init_header;
291         while (tail_old->next)
292                 tail_old = tail_old->next;
293         tail = tail_old;
294         for (i = 0; i < init_mem->val_attr_num; i++) {
295                 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
296                 if (!mem_init)
297                         goto out_err;
298                 mem_init->ae = ae;
299                 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
300                 mem_init->value = &mem_val_attr->value;
301                 mem_init->size = 4;
302                 mem_init->next = NULL;
303                 tail->next = mem_init;
304                 tail = mem_init;
305                 init_header->size += qat_hal_get_ins_num();
306                 mem_val_attr++;
307         }
308         return 0;
309 out_err:
310         /* Do not free the list head unless we allocated it. */
311         tail_old = tail_old->next;
312         if (flag) {
313                 kfree(*init_tab_base);
314                 *init_tab_base = NULL;
315         }
316
317         while (tail_old) {
318                 mem_init = tail_old->next;
319                 kfree(tail_old);
320                 tail_old = mem_init;
321         }
322         return -ENOMEM;
323 }
324
325 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
326                                   struct icp_qat_uof_initmem *init_mem)
327 {
328         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
329         unsigned int ae;
330
331         if (qat_uclo_fetch_initmem_ae(handle, init_mem,
332                                       ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
333                 return -EINVAL;
334         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
335                                             &obj_handle->lm_init_tab[ae]))
336                 return -EINVAL;
337         return 0;
338 }
339
340 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
341                                   struct icp_qat_uof_initmem *init_mem)
342 {
343         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
344         unsigned int ae, ustore_size, uaddr, i;
345
346         ustore_size = obj_handle->ustore_phy_size;
347         if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
348                 return -EINVAL;
349         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
350                                             &obj_handle->umem_init_tab[ae]))
351                 return -EINVAL;
352         /* set the highest ustore address referenced */
353         uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
354         for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
355                 if (obj_handle->ae_data[ae].ae_slices[i].
356                     encap_image->uwords_num < uaddr)
357                         obj_handle->ae_data[ae].ae_slices[i].
358                         encap_image->uwords_num = uaddr;
359         }
360         return 0;
361 }
362
363 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
364                                    struct icp_qat_uof_initmem *init_mem)
365 {
366         switch (init_mem->region) {
367         case ICP_QAT_UOF_LMEM_REGION:
368                 if (qat_uclo_init_lmem_seg(handle, init_mem))
369                         return -EINVAL;
370                 break;
371         case ICP_QAT_UOF_UMEM_REGION:
372                 if (qat_uclo_init_umem_seg(handle, init_mem))
373                         return -EINVAL;
374                 break;
375         default:
376                 pr_err("QAT: initmem region error. region type=0x%x\n",
377                        init_mem->region);
378                 return -EINVAL;
379         }
380         return 0;
381 }
382
383 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
384                                 struct icp_qat_uclo_encapme *image)
385 {
386         unsigned int i;
387         struct icp_qat_uclo_encap_page *page;
388         struct icp_qat_uof_image *uof_image;
389         unsigned char ae;
390         unsigned int ustore_size;
391         unsigned int patt_pos;
392         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
393         uint64_t *fill_data;
394
395         uof_image = image->img_ptr;
396         fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
397                             GFP_KERNEL);
398         if (!fill_data)
399                 return -ENOMEM;
400         for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
401                 memcpy(&fill_data[i], &uof_image->fill_pattern,
402                        sizeof(uint64_t));
403         page = image->page;
404
405         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
406                 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
407                         continue;
408                 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
409                 patt_pos = page->beg_addr_p + page->micro_words_num;
410
411                 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
412                                   page->beg_addr_p, &fill_data[0]);
413                 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
414                                   ustore_size - patt_pos + 1,
415                                   &fill_data[page->beg_addr_p]);
416         }
417         kfree(fill_data);
418         return 0;
419 }
420
421 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
422 {
423         int i, ae;
424         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
425         struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
426
427         for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
428                 if (initmem->num_in_bytes) {
429                         if (qat_uclo_init_ae_memory(handle, initmem))
430                                 return -EINVAL;
431                 }
432                 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
433                         (unsigned long)initmem +
434                         sizeof(struct icp_qat_uof_initmem)) +
435                         (sizeof(struct icp_qat_uof_memvar_attr) *
436                         initmem->val_attr_num));
437         }
438         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
439                 if (qat_hal_batch_wr_lm(handle, ae,
440                                         obj_handle->lm_init_tab[ae])) {
441                         pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
442                         return -EINVAL;
443                 }
444                 qat_uclo_cleanup_batch_init_list(handle,
445                                                  &obj_handle->lm_init_tab[ae]);
446                 qat_uclo_batch_wr_umem(handle, ae,
447                                        obj_handle->umem_init_tab[ae]);
448                 qat_uclo_cleanup_batch_init_list(handle,
449                                                  &obj_handle->
450                                                  umem_init_tab[ae]);
451         }
452         return 0;
453 }
454
455 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
456                                  char *chunk_id, void *cur)
457 {
458         int i;
459         struct icp_qat_uof_chunkhdr *chunk_hdr =
460             (struct icp_qat_uof_chunkhdr *)
461             ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
462
463         for (i = 0; i < obj_hdr->num_chunks; i++) {
464                 if ((cur < (void *)&chunk_hdr[i]) &&
465                     !strncmp(chunk_hdr[i].chunk_id, chunk_id,
466                              ICP_QAT_UOF_OBJID_LEN)) {
467                         return &chunk_hdr[i];
468                 }
469         }
470         return NULL;
471 }
472
473 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
474 {
475         int i;
476         unsigned int topbit = 1 << 0xF;
477         unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
478
479         reg ^= inbyte << 0x8;
480         for (i = 0; i < 0x8; i++) {
481                 if (reg & topbit)
482                         reg = (reg << 1) ^ 0x1021;
483                 else
484                         reg <<= 1;
485         }
486         return reg & 0xFFFF;
487 }
488
489 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
490 {
491         unsigned int chksum = 0;
492
493         if (ptr)
494                 while (num--)
495                         chksum = qat_uclo_calc_checksum(chksum, *ptr++);
496         return chksum;
497 }
498
499 static struct icp_qat_uclo_objhdr *
500 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
501                    char *chunk_id)
502 {
503         struct icp_qat_uof_filechunkhdr *file_chunk;
504         struct icp_qat_uclo_objhdr *obj_hdr;
505         char *chunk;
506         int i;
507
508         file_chunk = (struct icp_qat_uof_filechunkhdr *)
509                 (buf + sizeof(struct icp_qat_uof_filehdr));
510         for (i = 0; i < file_hdr->num_chunks; i++) {
511                 if (!strncmp(file_chunk->chunk_id, chunk_id,
512                              ICP_QAT_UOF_OBJID_LEN)) {
513                         chunk = buf + file_chunk->offset;
514                         if (file_chunk->checksum != qat_uclo_calc_str_checksum(
515                                 chunk, file_chunk->size))
516                                 break;
517                         obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
518                         if (!obj_hdr)
519                                 break;
520                         obj_hdr->file_buff = chunk;
521                         obj_hdr->checksum = file_chunk->checksum;
522                         obj_hdr->size = file_chunk->size;
523                         return obj_hdr;
524                 }
525                 file_chunk++;
526         }
527         return NULL;
528 }
529
530 static unsigned int
531 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
532                             struct icp_qat_uof_image *image)
533 {
534         struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
535         struct icp_qat_uof_objtable *neigh_reg_tab;
536         struct icp_qat_uof_code_page *code_page;
537
538         code_page = (struct icp_qat_uof_code_page *)
539                         ((char *)image + sizeof(struct icp_qat_uof_image));
540         uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
541                      code_page->uc_var_tab_offset);
542         imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
543                       code_page->imp_var_tab_offset);
544         imp_expr_tab = (struct icp_qat_uof_objtable *)
545                        (encap_uof_obj->beg_uof +
546                        code_page->imp_expr_tab_offset);
547         if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
548             imp_expr_tab->entry_num) {
549                 pr_err("QAT: UOF can't contain imported variable to be parsed");
550                 return -EINVAL;
551         }
552         neigh_reg_tab = (struct icp_qat_uof_objtable *)
553                         (encap_uof_obj->beg_uof +
554                         code_page->neigh_reg_tab_offset);
555         if (neigh_reg_tab->entry_num) {
556                 pr_err("QAT: UOF can't contain shared control store feature");
557                 return -EINVAL;
558         }
559         if (image->numpages > 1) {
560                 pr_err("QAT: UOF can't contain multiple pages");
561                 return -EINVAL;
562         }
563         if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
564                 pr_err("QAT: UOF can't use shared control store feature");
565                 return -EFAULT;
566         }
567         if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
568                 pr_err("QAT: UOF can't use reloadable feature");
569                 return -EFAULT;
570         }
571         return 0;
572 }
573
574 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
575                                      *encap_uof_obj,
576                                      struct icp_qat_uof_image *img,
577                                      struct icp_qat_uclo_encap_page *page)
578 {
579         struct icp_qat_uof_code_page *code_page;
580         struct icp_qat_uof_code_area *code_area;
581         struct icp_qat_uof_objtable *uword_block_tab;
582         struct icp_qat_uof_uword_block *uwblock;
583         int i;
584
585         code_page = (struct icp_qat_uof_code_page *)
586                         ((char *)img + sizeof(struct icp_qat_uof_image));
587         page->def_page = code_page->def_page;
588         page->page_region = code_page->page_region;
589         page->beg_addr_v = code_page->beg_addr_v;
590         page->beg_addr_p = code_page->beg_addr_p;
591         code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
592                                                 code_page->code_area_offset);
593         page->micro_words_num = code_area->micro_words_num;
594         uword_block_tab = (struct icp_qat_uof_objtable *)
595                           (encap_uof_obj->beg_uof +
596                           code_area->uword_block_tab);
597         page->uwblock_num = uword_block_tab->entry_num;
598         uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
599                         sizeof(struct icp_qat_uof_objtable));
600         page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
601         for (i = 0; i < uword_block_tab->entry_num; i++)
602                 page->uwblock[i].micro_words =
603                 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
604 }
605
606 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
607                                struct icp_qat_uclo_encapme *ae_uimage,
608                                int max_image)
609 {
610         int i, j;
611         struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
612         struct icp_qat_uof_image *image;
613         struct icp_qat_uof_objtable *ae_regtab;
614         struct icp_qat_uof_objtable *init_reg_sym_tab;
615         struct icp_qat_uof_objtable *sbreak_tab;
616         struct icp_qat_uof_encap_obj *encap_uof_obj =
617                                         &obj_handle->encap_uof_obj;
618
619         for (j = 0; j < max_image; j++) {
620                 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
621                                                 ICP_QAT_UOF_IMAG, chunk_hdr);
622                 if (!chunk_hdr)
623                         break;
624                 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
625                                                      chunk_hdr->offset);
626                 ae_regtab = (struct icp_qat_uof_objtable *)
627                            (image->reg_tab_offset +
628                            obj_handle->obj_hdr->file_buff);
629                 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
630                 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
631                         (((char *)ae_regtab) +
632                         sizeof(struct icp_qat_uof_objtable));
633                 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
634                                    (image->init_reg_sym_tab +
635                                    obj_handle->obj_hdr->file_buff);
636                 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
637                 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
638                         (((char *)init_reg_sym_tab) +
639                         sizeof(struct icp_qat_uof_objtable));
640                 sbreak_tab = (struct icp_qat_uof_objtable *)
641                         (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
642                 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
643                 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
644                                       (((char *)sbreak_tab) +
645                                       sizeof(struct icp_qat_uof_objtable));
646                 ae_uimage[j].img_ptr = image;
647                 if (qat_uclo_check_image_compat(encap_uof_obj, image))
648                         goto out_err;
649                 ae_uimage[j].page =
650                         kzalloc(sizeof(struct icp_qat_uclo_encap_page),
651                                 GFP_KERNEL);
652                 if (!ae_uimage[j].page)
653                         goto out_err;
654                 qat_uclo_map_image_page(encap_uof_obj, image,
655                                         ae_uimage[j].page);
656         }
657         return j;
658 out_err:
659         for (i = 0; i < j; i++)
660                 kfree(ae_uimage[i].page);
661         return 0;
662 }
663
664 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
665 {
666         int i, ae;
667         int mflag = 0;
668         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
669
670         for (ae = 0; ae <= max_ae; ae++) {
671                 if (!test_bit(ae,
672                               (unsigned long *)&handle->hal_handle->ae_mask))
673                         continue;
674                 for (i = 0; i < obj_handle->uimage_num; i++) {
675                         if (!test_bit(ae, (unsigned long *)
676                         &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
677                                 continue;
678                         mflag = 1;
679                         if (qat_uclo_init_ae_data(obj_handle, ae, i))
680                                 return -EINVAL;
681                 }
682         }
683         if (!mflag) {
684                 pr_err("QAT: uimage uses AE not set");
685                 return -EINVAL;
686         }
687         return 0;
688 }
689
690 static struct icp_qat_uof_strtable *
691 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
692                        char *tab_name, struct icp_qat_uof_strtable *str_table)
693 {
694         struct icp_qat_uof_chunkhdr *chunk_hdr;
695
696         chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
697                                         obj_hdr->file_buff, tab_name, NULL);
698         if (chunk_hdr) {
699                 int hdr_size;
700
701                 memcpy(&str_table->table_len, obj_hdr->file_buff +
702                        chunk_hdr->offset, sizeof(str_table->table_len));
703                 hdr_size = (char *)&str_table->strings - (char *)str_table;
704                 str_table->strings = (unsigned long)obj_hdr->file_buff +
705                                         chunk_hdr->offset + hdr_size;
706                 return str_table;
707         }
708         return NULL;
709 }
710
711 static void
712 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
713                            struct icp_qat_uclo_init_mem_table *init_mem_tab)
714 {
715         struct icp_qat_uof_chunkhdr *chunk_hdr;
716
717         chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
718                                         ICP_QAT_UOF_IMEM, NULL);
719         if (chunk_hdr) {
720                 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
721                         chunk_hdr->offset, sizeof(unsigned int));
722                 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
723                 (encap_uof_obj->beg_uof + chunk_hdr->offset +
724                 sizeof(unsigned int));
725         }
726 }
727
728 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
729 {
730         unsigned int maj_ver, prod_type = obj_handle->prod_type;
731
732         if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
733                 pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
734                        obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
735                 return -EINVAL;
736         }
737         maj_ver = obj_handle->prod_rev & 0xff;
738         if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
739             (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
740                 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
741                 return -EINVAL;
742         }
743         return 0;
744 }
745
746 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
747                              unsigned char ae, unsigned char ctx_mask,
748                              enum icp_qat_uof_regtype reg_type,
749                              unsigned short reg_addr, unsigned int value)
750 {
751         switch (reg_type) {
752         case ICP_GPA_ABS:
753         case ICP_GPB_ABS:
754                 ctx_mask = 0;
755         case ICP_GPA_REL:
756         case ICP_GPB_REL:
757                 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
758                                         reg_addr, value);
759         case ICP_SR_ABS:
760         case ICP_DR_ABS:
761         case ICP_SR_RD_ABS:
762         case ICP_DR_RD_ABS:
763                 ctx_mask = 0;
764         case ICP_SR_REL:
765         case ICP_DR_REL:
766         case ICP_SR_RD_REL:
767         case ICP_DR_RD_REL:
768                 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
769                                             reg_addr, value);
770         case ICP_SR_WR_ABS:
771         case ICP_DR_WR_ABS:
772                 ctx_mask = 0;
773         case ICP_SR_WR_REL:
774         case ICP_DR_WR_REL:
775                 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
776                                             reg_addr, value);
777         case ICP_NEIGH_REL:
778                 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
779         default:
780                 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
781                 return -EFAULT;
782         }
783         return 0;
784 }
785
786 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
787                                  unsigned int ae,
788                                  struct icp_qat_uclo_encapme *encap_ae)
789 {
790         unsigned int i;
791         unsigned char ctx_mask;
792         struct icp_qat_uof_init_regsym *init_regsym;
793
794         if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
795             ICP_QAT_UCLO_MAX_CTX)
796                 ctx_mask = 0xff;
797         else
798                 ctx_mask = 0x55;
799
800         for (i = 0; i < encap_ae->init_regsym_num; i++) {
801                 unsigned int exp_res;
802
803                 init_regsym = &encap_ae->init_regsym[i];
804                 exp_res = init_regsym->value;
805                 switch (init_regsym->init_type) {
806                 case ICP_QAT_UOF_INIT_REG:
807                         qat_uclo_init_reg(handle, ae, ctx_mask,
808                                           (enum icp_qat_uof_regtype)
809                                           init_regsym->reg_type,
810                                           (unsigned short)init_regsym->reg_addr,
811                                           exp_res);
812                         break;
813                 case ICP_QAT_UOF_INIT_REG_CTX:
814                         /* check if ctx is appropriate for the ctxMode */
815                         if (!((1 << init_regsym->ctx) & ctx_mask)) {
816                                 pr_err("QAT: invalid ctx num = 0x%x\n",
817                                        init_regsym->ctx);
818                                 return -EINVAL;
819                         }
820                         qat_uclo_init_reg(handle, ae,
821                                           (unsigned char)
822                                           (1 << init_regsym->ctx),
823                                           (enum icp_qat_uof_regtype)
824                                           init_regsym->reg_type,
825                                           (unsigned short)init_regsym->reg_addr,
826                                           exp_res);
827                         break;
828                 case ICP_QAT_UOF_INIT_EXPR:
829                         pr_err("QAT: INIT_EXPR feature not supported\n");
830                         return -EINVAL;
831                 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
832                         pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
833                         return -EINVAL;
834                 default:
835                         break;
836                 }
837         }
838         return 0;
839 }
840
841 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
842 {
843         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
844         unsigned int s, ae;
845
846         if (obj_handle->global_inited)
847                 return 0;
848         if (obj_handle->init_mem_tab.entry_num) {
849                 if (qat_uclo_init_memory(handle)) {
850                         pr_err("QAT: initialize memory failed\n");
851                         return -EINVAL;
852                 }
853         }
854         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
855                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
856                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
857                                 continue;
858                         if (qat_uclo_init_reg_sym(handle, ae,
859                                                   obj_handle->ae_data[ae].
860                                                   ae_slices[s].encap_image))
861                                 return -EINVAL;
862                 }
863         }
864         obj_handle->global_inited = 1;
865         return 0;
866 }
867
868 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
869 {
870         unsigned char ae, nn_mode, s;
871         struct icp_qat_uof_image *uof_image;
872         struct icp_qat_uclo_aedata *ae_data;
873         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
874
875         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
876                 if (!test_bit(ae,
877                               (unsigned long *)&handle->hal_handle->ae_mask))
878                         continue;
879                 ae_data = &obj_handle->ae_data[ae];
880                 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
881                                       ICP_QAT_UCLO_MAX_CTX); s++) {
882                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
883                                 continue;
884                         uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
885                         if (qat_hal_set_ae_ctx_mode(handle, ae,
886                                                     (char)ICP_QAT_CTX_MODE
887                                                     (uof_image->ae_mode))) {
888                                 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
889                                 return -EFAULT;
890                         }
891                         nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
892                         if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
893                                 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
894                                 return -EFAULT;
895                         }
896                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
897                                                    (char)ICP_QAT_LOC_MEM0_MODE
898                                                    (uof_image->ae_mode))) {
899                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
900                                 return -EFAULT;
901                         }
902                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
903                                                    (char)ICP_QAT_LOC_MEM1_MODE
904                                                    (uof_image->ae_mode))) {
905                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
906                                 return -EFAULT;
907                         }
908                 }
909         }
910         return 0;
911 }
912
913 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
914 {
915         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
916         struct icp_qat_uclo_encapme *image;
917         int a;
918
919         for (a = 0; a < obj_handle->uimage_num; a++) {
920                 image = &obj_handle->ae_uimage[a];
921                 image->uwords_num = image->page->beg_addr_p +
922                                         image->page->micro_words_num;
923         }
924 }
925
926 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
927 {
928         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
929         unsigned int ae;
930
931         obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
932                                         GFP_KERNEL);
933         if (!obj_handle->uword_buf)
934                 return -ENOMEM;
935         obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
936         obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
937                                              obj_handle->obj_hdr->file_buff;
938         obj_handle->uword_in_bytes = 6;
939         obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
940         obj_handle->prod_rev = PID_MAJOR_REV |
941                         (PID_MINOR_REV & handle->hal_handle->revision_id);
942         if (qat_uclo_check_uof_compat(obj_handle)) {
943                 pr_err("QAT: UOF incompatible\n");
944                 return -EINVAL;
945         }
946         obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
947         if (!obj_handle->obj_hdr->file_buff ||
948             !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
949                                     &obj_handle->str_table)) {
950                 pr_err("QAT: UOF doesn't have effective images\n");
951                 goto out_err;
952         }
953         obj_handle->uimage_num =
954                 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
955                                     ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
956         if (!obj_handle->uimage_num)
957                 goto out_err;
958         if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
959                 pr_err("QAT: Bad object\n");
960                 goto out_check_uof_aemask_err;
961         }
962         qat_uclo_init_uword_num(handle);
963         qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
964                                    &obj_handle->init_mem_tab);
965         if (qat_uclo_set_ae_mode(handle))
966                 goto out_check_uof_aemask_err;
967         return 0;
968 out_check_uof_aemask_err:
969         for (ae = 0; ae < obj_handle->uimage_num; ae++)
970                 kfree(obj_handle->ae_uimage[ae].page);
971 out_err:
972         kfree(obj_handle->uword_buf);
973         return -EFAULT;
974 }
975
976 void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
977                         void *addr_ptr, int mem_size)
978 {
979         qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
980 }
981
982 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
983                          void *addr_ptr, int mem_size)
984 {
985         struct icp_qat_uof_filehdr *filehdr;
986         struct icp_qat_uclo_objhandle *objhdl;
987
988         BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
989                      (sizeof(handle->hal_handle->ae_mask) * 8));
990
991         if (!handle || !addr_ptr || mem_size < 24)
992                 return -EINVAL;
993         objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
994         if (!objhdl)
995                 return -ENOMEM;
996         objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
997         if (!objhdl->obj_buf)
998                 goto out_objbuf_err;
999         filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1000         if (qat_uclo_check_format(filehdr))
1001                 goto out_objhdr_err;
1002         objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1003                                              ICP_QAT_UOF_OBJS);
1004         if (!objhdl->obj_hdr) {
1005                 pr_err("QAT: object file chunk is null\n");
1006                 goto out_objhdr_err;
1007         }
1008         handle->obj_handle = objhdl;
1009         if (qat_uclo_parse_uof_obj(handle))
1010                 goto out_overlay_obj_err;
1011         return 0;
1012
1013 out_overlay_obj_err:
1014         handle->obj_handle = NULL;
1015         kfree(objhdl->obj_hdr);
1016 out_objhdr_err:
1017         kfree(objhdl->obj_buf);
1018 out_objbuf_err:
1019         kfree(objhdl);
1020         return -ENOMEM;
1021 }
1022
1023 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1024 {
1025         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1026         unsigned int a;
1027
1028         if (!obj_handle)
1029                 return;
1030
1031         kfree(obj_handle->uword_buf);
1032         for (a = 0; a < obj_handle->uimage_num; a++)
1033                 kfree(obj_handle->ae_uimage[a].page);
1034
1035         for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1036                 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1037
1038         kfree(obj_handle->obj_hdr);
1039         kfree(obj_handle->obj_buf);
1040         kfree(obj_handle);
1041         handle->obj_handle = NULL;
1042 }
1043
1044 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1045                                  struct icp_qat_uclo_encap_page *encap_page,
1046                                  uint64_t *uword, unsigned int addr_p,
1047                                  unsigned int raddr, uint64_t fill)
1048 {
1049         uint64_t uwrd = 0;
1050         unsigned int i;
1051
1052         if (!encap_page) {
1053                 *uword = fill;
1054                 return;
1055         }
1056         for (i = 0; i < encap_page->uwblock_num; i++) {
1057                 if (raddr >= encap_page->uwblock[i].start_addr &&
1058                     raddr <= encap_page->uwblock[i].start_addr +
1059                     encap_page->uwblock[i].words_num - 1) {
1060                         raddr -= encap_page->uwblock[i].start_addr;
1061                         raddr *= obj_handle->uword_in_bytes;
1062                         memcpy(&uwrd, (void *)(((unsigned long)
1063                                encap_page->uwblock[i].micro_words) + raddr),
1064                                obj_handle->uword_in_bytes);
1065                         uwrd = uwrd & 0xbffffffffffull;
1066                 }
1067         }
1068         *uword = uwrd;
1069         if (*uword == INVLD_UWORD)
1070                 *uword = fill;
1071 }
1072
1073 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1074                                         struct icp_qat_uclo_encap_page
1075                                         *encap_page, unsigned int ae)
1076 {
1077         unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1078         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1079         uint64_t fill_pat;
1080
1081         /* load the page starting at appropriate ustore address */
1082         /* get fill-pattern from an image -- they are all the same */
1083         memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1084                sizeof(uint64_t));
1085         uw_physical_addr = encap_page->beg_addr_p;
1086         uw_relative_addr = 0;
1087         words_num = encap_page->micro_words_num;
1088         while (words_num) {
1089                 if (words_num < UWORD_CPYBUF_SIZE)
1090                         cpylen = words_num;
1091                 else
1092                         cpylen = UWORD_CPYBUF_SIZE;
1093
1094                 /* load the buffer */
1095                 for (i = 0; i < cpylen; i++)
1096                         qat_uclo_fill_uwords(obj_handle, encap_page,
1097                                              &obj_handle->uword_buf[i],
1098                                              uw_physical_addr + i,
1099                                              uw_relative_addr + i, fill_pat);
1100
1101                 /* copy the buffer to ustore */
1102                 qat_hal_wr_uwords(handle, (unsigned char)ae,
1103                                   uw_physical_addr, cpylen,
1104                                   obj_handle->uword_buf);
1105
1106                 uw_physical_addr += cpylen;
1107                 uw_relative_addr += cpylen;
1108                 words_num -= cpylen;
1109         }
1110 }
1111
1112 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1113                                     struct icp_qat_uof_image *image)
1114 {
1115         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1116         unsigned int ctx_mask, s;
1117         struct icp_qat_uclo_page *page;
1118         unsigned char ae;
1119         int ctx;
1120
1121         if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1122                 ctx_mask = 0xff;
1123         else
1124                 ctx_mask = 0x55;
1125         /* load the default page and set assigned CTX PC
1126          * to the entrypoint address */
1127         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1128                 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1129                         continue;
1130                 /* find the slice to which this image is assigned */
1131                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1132                         if (image->ctx_assigned & obj_handle->ae_data[ae].
1133                             ae_slices[s].ctx_mask_assigned)
1134                                 break;
1135                 }
1136                 if (s >= obj_handle->ae_data[ae].slice_num)
1137                         continue;
1138                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1139                 if (!page->encap_page->def_page)
1140                         continue;
1141                 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1142
1143                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1144                 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1145                         obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1146                                         (ctx_mask & (1 << ctx)) ? page : NULL;
1147                 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1148                                      image->ctx_assigned);
1149                 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1150                                image->entry_address);
1151         }
1152 }
1153
1154 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1155 {
1156         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1157         unsigned int i;
1158
1159         if (qat_uclo_init_globals(handle))
1160                 return -EINVAL;
1161         for (i = 0; i < obj_handle->uimage_num; i++) {
1162                 if (!obj_handle->ae_uimage[i].img_ptr)
1163                         return -EINVAL;
1164                 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1165                         return -EINVAL;
1166                 qat_uclo_wr_uimage_page(handle,
1167                                         obj_handle->ae_uimage[i].img_ptr);
1168         }
1169         return 0;
1170 }