GNU Linux-libre 4.9.311-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_uclo.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
56
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
61
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63                                  unsigned int ae, unsigned int image_num)
64 {
65         struct icp_qat_uclo_aedata *ae_data;
66         struct icp_qat_uclo_encapme *encap_image;
67         struct icp_qat_uclo_page *page = NULL;
68         struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70         ae_data = &obj_handle->ae_data[ae];
71         encap_image = &obj_handle->ae_uimage[image_num];
72         ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73         ae_slice->encap_image = encap_image;
74
75         if (encap_image->img_ptr) {
76                 ae_slice->ctx_mask_assigned =
77                                         encap_image->img_ptr->ctx_assigned;
78                 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79         } else {
80                 ae_slice->ctx_mask_assigned = 0;
81         }
82         ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83         if (!ae_slice->region)
84                 return -ENOMEM;
85         ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86         if (!ae_slice->page)
87                 goto out_err;
88         page = ae_slice->page;
89         page->encap_page = encap_image->page;
90         ae_slice->page->region = ae_slice->region;
91         ae_data->slice_num++;
92         return 0;
93 out_err:
94         kfree(ae_slice->region);
95         ae_slice->region = NULL;
96         return -ENOMEM;
97 }
98
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100 {
101         unsigned int i;
102
103         if (!ae_data) {
104                 pr_err("QAT: bad argument, ae_data is NULL\n ");
105                 return -EINVAL;
106         }
107
108         for (i = 0; i < ae_data->slice_num; i++) {
109                 kfree(ae_data->ae_slices[i].region);
110                 ae_data->ae_slices[i].region = NULL;
111                 kfree(ae_data->ae_slices[i].page);
112                 ae_data->ae_slices[i].page = NULL;
113         }
114         return 0;
115 }
116
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118                                  unsigned int str_offset)
119 {
120         if ((!str_table->table_len) || (str_offset > str_table->table_len))
121                 return NULL;
122         return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
123 }
124
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
126 {
127         int maj = hdr->maj_ver & 0xff;
128         int min = hdr->min_ver & 0xff;
129
130         if (hdr->file_id != ICP_QAT_UOF_FID) {
131                 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132                 return -EINVAL;
133         }
134         if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135                 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136                        maj, min);
137                 return -EINVAL;
138         }
139         return 0;
140 }
141
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
143 {
144         int maj = suof_hdr->maj_ver & 0xff;
145         int min = suof_hdr->min_ver & 0xff;
146
147         if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148                 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
149                 return -EINVAL;
150         }
151         if (suof_hdr->fw_type != 0) {
152                 pr_err("QAT: unsupported firmware type\n");
153                 return -EINVAL;
154         }
155         if (suof_hdr->num_chunks <= 0x1) {
156                 pr_err("QAT: SUOF chunk amount is incorrect\n");
157                 return -EINVAL;
158         }
159         if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160                 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
161                        maj, min);
162                 return -EINVAL;
163         }
164         return 0;
165 }
166
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168                                       unsigned int addr, unsigned int *val,
169                                       unsigned int num_in_bytes)
170 {
171         unsigned int outval;
172         unsigned char *ptr = (unsigned char *)val;
173
174         while (num_in_bytes) {
175                 memcpy(&outval, ptr, 4);
176                 SRAM_WRITE(handle, addr, outval);
177                 num_in_bytes -= 4;
178                 ptr += 4;
179                 addr += 4;
180         }
181 }
182
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184                                       unsigned char ae, unsigned int addr,
185                                       unsigned int *val,
186                                       unsigned int num_in_bytes)
187 {
188         unsigned int outval;
189         unsigned char *ptr = (unsigned char *)val;
190
191         addr >>= 0x2; /* convert to uword address */
192
193         while (num_in_bytes) {
194                 memcpy(&outval, ptr, 4);
195                 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
196                 num_in_bytes -= 4;
197                 ptr += 4;
198         }
199 }
200
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
202                                    unsigned char ae,
203                                    struct icp_qat_uof_batch_init
204                                    *umem_init_header)
205 {
206         struct icp_qat_uof_batch_init *umem_init;
207
208         if (!umem_init_header)
209                 return;
210         umem_init = umem_init_header->next;
211         while (umem_init) {
212                 unsigned int addr, *value, size;
213
214                 ae = umem_init->ae;
215                 addr = umem_init->addr;
216                 value = umem_init->value;
217                 size = umem_init->size;
218                 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219                 umem_init = umem_init->next;
220         }
221 }
222
223 static void
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225                                  struct icp_qat_uof_batch_init **base)
226 {
227         struct icp_qat_uof_batch_init *umem_init;
228
229         umem_init = *base;
230         while (umem_init) {
231                 struct icp_qat_uof_batch_init *pre;
232
233                 pre = umem_init;
234                 umem_init = umem_init->next;
235                 kfree(pre);
236         }
237         *base = NULL;
238 }
239
240 static int qat_uclo_parse_num(char *str, unsigned int *num)
241 {
242         char buf[16] = {0};
243         unsigned long ae = 0;
244         int i;
245
246         strncpy(buf, str, 15);
247         for (i = 0; i < 16; i++) {
248                 if (!isdigit(buf[i])) {
249                         buf[i] = '\0';
250                         break;
251                 }
252         }
253         if ((kstrtoul(buf, 10, &ae)))
254                 return -EFAULT;
255
256         *num = (unsigned int)ae;
257         return 0;
258 }
259
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261                                      struct icp_qat_uof_initmem *init_mem,
262                                      unsigned int size_range, unsigned int *ae)
263 {
264         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
265         char *str;
266
267         if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268                 pr_err("QAT: initmem is out of range");
269                 return -EINVAL;
270         }
271         if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272                 pr_err("QAT: Memory scope for init_mem error\n");
273                 return -EINVAL;
274         }
275         str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
276         if (!str) {
277                 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
278                 return -EINVAL;
279         }
280         if (qat_uclo_parse_num(str, ae)) {
281                 pr_err("QAT: Parse num for AE number failed\n");
282                 return -EINVAL;
283         }
284         if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285                 pr_err("QAT: ae %d out of range\n", *ae);
286                 return -EINVAL;
287         }
288         return 0;
289 }
290
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292                                            *handle, struct icp_qat_uof_initmem
293                                            *init_mem, unsigned int ae,
294                                            struct icp_qat_uof_batch_init
295                                            **init_tab_base)
296 {
297         struct icp_qat_uof_batch_init *init_header, *tail;
298         struct icp_qat_uof_batch_init *mem_init, *tail_old;
299         struct icp_qat_uof_memvar_attr *mem_val_attr;
300         unsigned int i, flag = 0;
301
302         mem_val_attr =
303                 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304                 sizeof(struct icp_qat_uof_initmem));
305
306         init_header = *init_tab_base;
307         if (!init_header) {
308                 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
309                 if (!init_header)
310                         return -ENOMEM;
311                 init_header->size = 1;
312                 *init_tab_base = init_header;
313                 flag = 1;
314         }
315         tail_old = init_header;
316         while (tail_old->next)
317                 tail_old = tail_old->next;
318         tail = tail_old;
319         for (i = 0; i < init_mem->val_attr_num; i++) {
320                 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
321                 if (!mem_init)
322                         goto out_err;
323                 mem_init->ae = ae;
324                 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325                 mem_init->value = &mem_val_attr->value;
326                 mem_init->size = 4;
327                 mem_init->next = NULL;
328                 tail->next = mem_init;
329                 tail = mem_init;
330                 init_header->size += qat_hal_get_ins_num();
331                 mem_val_attr++;
332         }
333         return 0;
334 out_err:
335         /* Do not free the list head unless we allocated it. */
336         tail_old = tail_old->next;
337         if (flag) {
338                 kfree(*init_tab_base);
339                 *init_tab_base = NULL;
340         }
341
342         while (tail_old) {
343                 mem_init = tail_old->next;
344                 kfree(tail_old);
345                 tail_old = mem_init;
346         }
347         return -ENOMEM;
348 }
349
350 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
351                                   struct icp_qat_uof_initmem *init_mem)
352 {
353         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
354         unsigned int ae;
355
356         if (qat_uclo_fetch_initmem_ae(handle, init_mem,
357                                       ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
358                 return -EINVAL;
359         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
360                                             &obj_handle->lm_init_tab[ae]))
361                 return -EINVAL;
362         return 0;
363 }
364
365 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
366                                   struct icp_qat_uof_initmem *init_mem)
367 {
368         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
369         unsigned int ae, ustore_size, uaddr, i;
370
371         ustore_size = obj_handle->ustore_phy_size;
372         if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
373                 return -EINVAL;
374         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
375                                             &obj_handle->umem_init_tab[ae]))
376                 return -EINVAL;
377         /* set the highest ustore address referenced */
378         uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
379         for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
380                 if (obj_handle->ae_data[ae].ae_slices[i].
381                     encap_image->uwords_num < uaddr)
382                         obj_handle->ae_data[ae].ae_slices[i].
383                         encap_image->uwords_num = uaddr;
384         }
385         return 0;
386 }
387
388 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
389                                    struct icp_qat_uof_initmem *init_mem)
390 {
391         switch (init_mem->region) {
392         case ICP_QAT_UOF_LMEM_REGION:
393                 if (qat_uclo_init_lmem_seg(handle, init_mem))
394                         return -EINVAL;
395                 break;
396         case ICP_QAT_UOF_UMEM_REGION:
397                 if (qat_uclo_init_umem_seg(handle, init_mem))
398                         return -EINVAL;
399                 break;
400         default:
401                 pr_err("QAT: initmem region error. region type=0x%x\n",
402                        init_mem->region);
403                 return -EINVAL;
404         }
405         return 0;
406 }
407
408 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
409                                 struct icp_qat_uclo_encapme *image)
410 {
411         unsigned int i;
412         struct icp_qat_uclo_encap_page *page;
413         struct icp_qat_uof_image *uof_image;
414         unsigned char ae;
415         unsigned int ustore_size;
416         unsigned int patt_pos;
417         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
418         uint64_t *fill_data;
419
420         uof_image = image->img_ptr;
421         fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
422                             GFP_KERNEL);
423         if (!fill_data)
424                 return -ENOMEM;
425         for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
426                 memcpy(&fill_data[i], &uof_image->fill_pattern,
427                        sizeof(uint64_t));
428         page = image->page;
429
430         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
431                 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
432                         continue;
433                 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
434                 patt_pos = page->beg_addr_p + page->micro_words_num;
435
436                 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
437                                   page->beg_addr_p, &fill_data[0]);
438                 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
439                                   ustore_size - patt_pos + 1,
440                                   &fill_data[page->beg_addr_p]);
441         }
442         kfree(fill_data);
443         return 0;
444 }
445
446 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
447 {
448         int i, ae;
449         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
450         struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
451
452         for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
453                 if (initmem->num_in_bytes) {
454                         if (qat_uclo_init_ae_memory(handle, initmem))
455                                 return -EINVAL;
456                 }
457                 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
458                         (uintptr_t)initmem +
459                         sizeof(struct icp_qat_uof_initmem)) +
460                         (sizeof(struct icp_qat_uof_memvar_attr) *
461                         initmem->val_attr_num));
462         }
463         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
464                 if (qat_hal_batch_wr_lm(handle, ae,
465                                         obj_handle->lm_init_tab[ae])) {
466                         pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
467                         return -EINVAL;
468                 }
469                 qat_uclo_cleanup_batch_init_list(handle,
470                                                  &obj_handle->lm_init_tab[ae]);
471                 qat_uclo_batch_wr_umem(handle, ae,
472                                        obj_handle->umem_init_tab[ae]);
473                 qat_uclo_cleanup_batch_init_list(handle,
474                                                  &obj_handle->
475                                                  umem_init_tab[ae]);
476         }
477         return 0;
478 }
479
480 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
481                                  char *chunk_id, void *cur)
482 {
483         int i;
484         struct icp_qat_uof_chunkhdr *chunk_hdr =
485             (struct icp_qat_uof_chunkhdr *)
486             ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
487
488         for (i = 0; i < obj_hdr->num_chunks; i++) {
489                 if ((cur < (void *)&chunk_hdr[i]) &&
490                     !strncmp(chunk_hdr[i].chunk_id, chunk_id,
491                              ICP_QAT_UOF_OBJID_LEN)) {
492                         return &chunk_hdr[i];
493                 }
494         }
495         return NULL;
496 }
497
498 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
499 {
500         int i;
501         unsigned int topbit = 1 << 0xF;
502         unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
503
504         reg ^= inbyte << 0x8;
505         for (i = 0; i < 0x8; i++) {
506                 if (reg & topbit)
507                         reg = (reg << 1) ^ 0x1021;
508                 else
509                         reg <<= 1;
510         }
511         return reg & 0xFFFF;
512 }
513
514 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
515 {
516         unsigned int chksum = 0;
517
518         if (ptr)
519                 while (num--)
520                         chksum = qat_uclo_calc_checksum(chksum, *ptr++);
521         return chksum;
522 }
523
524 static struct icp_qat_uclo_objhdr *
525 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
526                    char *chunk_id)
527 {
528         struct icp_qat_uof_filechunkhdr *file_chunk;
529         struct icp_qat_uclo_objhdr *obj_hdr;
530         char *chunk;
531         int i;
532
533         file_chunk = (struct icp_qat_uof_filechunkhdr *)
534                 (buf + sizeof(struct icp_qat_uof_filehdr));
535         for (i = 0; i < file_hdr->num_chunks; i++) {
536                 if (!strncmp(file_chunk->chunk_id, chunk_id,
537                              ICP_QAT_UOF_OBJID_LEN)) {
538                         chunk = buf + file_chunk->offset;
539                         if (file_chunk->checksum != qat_uclo_calc_str_checksum(
540                                 chunk, file_chunk->size))
541                                 break;
542                         obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
543                         if (!obj_hdr)
544                                 break;
545                         obj_hdr->file_buff = chunk;
546                         obj_hdr->checksum = file_chunk->checksum;
547                         obj_hdr->size = file_chunk->size;
548                         return obj_hdr;
549                 }
550                 file_chunk++;
551         }
552         return NULL;
553 }
554
555 static unsigned int
556 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
557                             struct icp_qat_uof_image *image)
558 {
559         struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
560         struct icp_qat_uof_objtable *neigh_reg_tab;
561         struct icp_qat_uof_code_page *code_page;
562
563         code_page = (struct icp_qat_uof_code_page *)
564                         ((char *)image + sizeof(struct icp_qat_uof_image));
565         uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
566                      code_page->uc_var_tab_offset);
567         imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
568                       code_page->imp_var_tab_offset);
569         imp_expr_tab = (struct icp_qat_uof_objtable *)
570                        (encap_uof_obj->beg_uof +
571                        code_page->imp_expr_tab_offset);
572         if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
573             imp_expr_tab->entry_num) {
574                 pr_err("QAT: UOF can't contain imported variable to be parsed");
575                 return -EINVAL;
576         }
577         neigh_reg_tab = (struct icp_qat_uof_objtable *)
578                         (encap_uof_obj->beg_uof +
579                         code_page->neigh_reg_tab_offset);
580         if (neigh_reg_tab->entry_num) {
581                 pr_err("QAT: UOF can't contain shared control store feature");
582                 return -EINVAL;
583         }
584         if (image->numpages > 1) {
585                 pr_err("QAT: UOF can't contain multiple pages");
586                 return -EINVAL;
587         }
588         if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
589                 pr_err("QAT: UOF can't use shared control store feature");
590                 return -EFAULT;
591         }
592         if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
593                 pr_err("QAT: UOF can't use reloadable feature");
594                 return -EFAULT;
595         }
596         return 0;
597 }
598
599 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
600                                      *encap_uof_obj,
601                                      struct icp_qat_uof_image *img,
602                                      struct icp_qat_uclo_encap_page *page)
603 {
604         struct icp_qat_uof_code_page *code_page;
605         struct icp_qat_uof_code_area *code_area;
606         struct icp_qat_uof_objtable *uword_block_tab;
607         struct icp_qat_uof_uword_block *uwblock;
608         int i;
609
610         code_page = (struct icp_qat_uof_code_page *)
611                         ((char *)img + sizeof(struct icp_qat_uof_image));
612         page->def_page = code_page->def_page;
613         page->page_region = code_page->page_region;
614         page->beg_addr_v = code_page->beg_addr_v;
615         page->beg_addr_p = code_page->beg_addr_p;
616         code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
617                                                 code_page->code_area_offset);
618         page->micro_words_num = code_area->micro_words_num;
619         uword_block_tab = (struct icp_qat_uof_objtable *)
620                           (encap_uof_obj->beg_uof +
621                           code_area->uword_block_tab);
622         page->uwblock_num = uword_block_tab->entry_num;
623         uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
624                         sizeof(struct icp_qat_uof_objtable));
625         page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
626         for (i = 0; i < uword_block_tab->entry_num; i++)
627                 page->uwblock[i].micro_words =
628                 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
629 }
630
631 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
632                                struct icp_qat_uclo_encapme *ae_uimage,
633                                int max_image)
634 {
635         int i, j;
636         struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
637         struct icp_qat_uof_image *image;
638         struct icp_qat_uof_objtable *ae_regtab;
639         struct icp_qat_uof_objtable *init_reg_sym_tab;
640         struct icp_qat_uof_objtable *sbreak_tab;
641         struct icp_qat_uof_encap_obj *encap_uof_obj =
642                                         &obj_handle->encap_uof_obj;
643
644         for (j = 0; j < max_image; j++) {
645                 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
646                                                 ICP_QAT_UOF_IMAG, chunk_hdr);
647                 if (!chunk_hdr)
648                         break;
649                 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
650                                                      chunk_hdr->offset);
651                 ae_regtab = (struct icp_qat_uof_objtable *)
652                            (image->reg_tab_offset +
653                            obj_handle->obj_hdr->file_buff);
654                 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
655                 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
656                         (((char *)ae_regtab) +
657                         sizeof(struct icp_qat_uof_objtable));
658                 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
659                                    (image->init_reg_sym_tab +
660                                    obj_handle->obj_hdr->file_buff);
661                 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
662                 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
663                         (((char *)init_reg_sym_tab) +
664                         sizeof(struct icp_qat_uof_objtable));
665                 sbreak_tab = (struct icp_qat_uof_objtable *)
666                         (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
667                 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
668                 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
669                                       (((char *)sbreak_tab) +
670                                       sizeof(struct icp_qat_uof_objtable));
671                 ae_uimage[j].img_ptr = image;
672                 if (qat_uclo_check_image_compat(encap_uof_obj, image))
673                         goto out_err;
674                 ae_uimage[j].page =
675                         kzalloc(sizeof(struct icp_qat_uclo_encap_page),
676                                 GFP_KERNEL);
677                 if (!ae_uimage[j].page)
678                         goto out_err;
679                 qat_uclo_map_image_page(encap_uof_obj, image,
680                                         ae_uimage[j].page);
681         }
682         return j;
683 out_err:
684         for (i = 0; i < j; i++)
685                 kfree(ae_uimage[i].page);
686         return 0;
687 }
688
689 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
690 {
691         int i, ae;
692         int mflag = 0;
693         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
694
695         for (ae = 0; ae < max_ae; ae++) {
696                 if (!test_bit(ae,
697                               (unsigned long *)&handle->hal_handle->ae_mask))
698                         continue;
699                 for (i = 0; i < obj_handle->uimage_num; i++) {
700                         if (!test_bit(ae, (unsigned long *)
701                         &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
702                                 continue;
703                         mflag = 1;
704                         if (qat_uclo_init_ae_data(obj_handle, ae, i))
705                                 return -EINVAL;
706                 }
707         }
708         if (!mflag) {
709                 pr_err("QAT: uimage uses AE not set");
710                 return -EINVAL;
711         }
712         return 0;
713 }
714
715 static struct icp_qat_uof_strtable *
716 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
717                        char *tab_name, struct icp_qat_uof_strtable *str_table)
718 {
719         struct icp_qat_uof_chunkhdr *chunk_hdr;
720
721         chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
722                                         obj_hdr->file_buff, tab_name, NULL);
723         if (chunk_hdr) {
724                 int hdr_size;
725
726                 memcpy(&str_table->table_len, obj_hdr->file_buff +
727                        chunk_hdr->offset, sizeof(str_table->table_len));
728                 hdr_size = (char *)&str_table->strings - (char *)str_table;
729                 str_table->strings = (uintptr_t)obj_hdr->file_buff +
730                                         chunk_hdr->offset + hdr_size;
731                 return str_table;
732         }
733         return NULL;
734 }
735
736 static void
737 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
738                            struct icp_qat_uclo_init_mem_table *init_mem_tab)
739 {
740         struct icp_qat_uof_chunkhdr *chunk_hdr;
741
742         chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
743                                         ICP_QAT_UOF_IMEM, NULL);
744         if (chunk_hdr) {
745                 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
746                         chunk_hdr->offset, sizeof(unsigned int));
747                 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
748                 (encap_uof_obj->beg_uof + chunk_hdr->offset +
749                 sizeof(unsigned int));
750         }
751 }
752
753 static unsigned int
754 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
755 {
756         switch (handle->pci_dev->device) {
757         case ADF_DH895XCC_PCI_DEVICE_ID:
758                 return ICP_QAT_AC_895XCC_DEV_TYPE;
759         case ADF_C62X_PCI_DEVICE_ID:
760                 return ICP_QAT_AC_C62X_DEV_TYPE;
761         case ADF_C3XXX_PCI_DEVICE_ID:
762                 return ICP_QAT_AC_C3XXX_DEV_TYPE;
763         default:
764                 pr_err("QAT: unsupported device 0x%x\n",
765                        handle->pci_dev->device);
766                 return 0;
767         }
768 }
769
770 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
771 {
772         unsigned int maj_ver, prod_type = obj_handle->prod_type;
773
774         if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
775                 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
776                        obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
777                        prod_type);
778                 return -EINVAL;
779         }
780         maj_ver = obj_handle->prod_rev & 0xff;
781         if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
782             (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
783                 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
784                 return -EINVAL;
785         }
786         return 0;
787 }
788
789 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
790                              unsigned char ae, unsigned char ctx_mask,
791                              enum icp_qat_uof_regtype reg_type,
792                              unsigned short reg_addr, unsigned int value)
793 {
794         switch (reg_type) {
795         case ICP_GPA_ABS:
796         case ICP_GPB_ABS:
797                 ctx_mask = 0;
798         case ICP_GPA_REL:
799         case ICP_GPB_REL:
800                 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
801                                         reg_addr, value);
802         case ICP_SR_ABS:
803         case ICP_DR_ABS:
804         case ICP_SR_RD_ABS:
805         case ICP_DR_RD_ABS:
806                 ctx_mask = 0;
807         case ICP_SR_REL:
808         case ICP_DR_REL:
809         case ICP_SR_RD_REL:
810         case ICP_DR_RD_REL:
811                 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
812                                             reg_addr, value);
813         case ICP_SR_WR_ABS:
814         case ICP_DR_WR_ABS:
815                 ctx_mask = 0;
816         case ICP_SR_WR_REL:
817         case ICP_DR_WR_REL:
818                 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
819                                             reg_addr, value);
820         case ICP_NEIGH_REL:
821                 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
822         default:
823                 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
824                 return -EFAULT;
825         }
826         return 0;
827 }
828
829 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
830                                  unsigned int ae,
831                                  struct icp_qat_uclo_encapme *encap_ae)
832 {
833         unsigned int i;
834         unsigned char ctx_mask;
835         struct icp_qat_uof_init_regsym *init_regsym;
836
837         if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
838             ICP_QAT_UCLO_MAX_CTX)
839                 ctx_mask = 0xff;
840         else
841                 ctx_mask = 0x55;
842
843         for (i = 0; i < encap_ae->init_regsym_num; i++) {
844                 unsigned int exp_res;
845
846                 init_regsym = &encap_ae->init_regsym[i];
847                 exp_res = init_regsym->value;
848                 switch (init_regsym->init_type) {
849                 case ICP_QAT_UOF_INIT_REG:
850                         qat_uclo_init_reg(handle, ae, ctx_mask,
851                                           (enum icp_qat_uof_regtype)
852                                           init_regsym->reg_type,
853                                           (unsigned short)init_regsym->reg_addr,
854                                           exp_res);
855                         break;
856                 case ICP_QAT_UOF_INIT_REG_CTX:
857                         /* check if ctx is appropriate for the ctxMode */
858                         if (!((1 << init_regsym->ctx) & ctx_mask)) {
859                                 pr_err("QAT: invalid ctx num = 0x%x\n",
860                                        init_regsym->ctx);
861                                 return -EINVAL;
862                         }
863                         qat_uclo_init_reg(handle, ae,
864                                           (unsigned char)
865                                           (1 << init_regsym->ctx),
866                                           (enum icp_qat_uof_regtype)
867                                           init_regsym->reg_type,
868                                           (unsigned short)init_regsym->reg_addr,
869                                           exp_res);
870                         break;
871                 case ICP_QAT_UOF_INIT_EXPR:
872                         pr_err("QAT: INIT_EXPR feature not supported\n");
873                         return -EINVAL;
874                 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
875                         pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
876                         return -EINVAL;
877                 default:
878                         break;
879                 }
880         }
881         return 0;
882 }
883
884 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
885 {
886         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
887         unsigned int s, ae;
888
889         if (obj_handle->global_inited)
890                 return 0;
891         if (obj_handle->init_mem_tab.entry_num) {
892                 if (qat_uclo_init_memory(handle)) {
893                         pr_err("QAT: initialize memory failed\n");
894                         return -EINVAL;
895                 }
896         }
897         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
898                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
899                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
900                                 continue;
901                         if (qat_uclo_init_reg_sym(handle, ae,
902                                                   obj_handle->ae_data[ae].
903                                                   ae_slices[s].encap_image))
904                                 return -EINVAL;
905                 }
906         }
907         obj_handle->global_inited = 1;
908         return 0;
909 }
910
911 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
912 {
913         unsigned char ae, nn_mode, s;
914         struct icp_qat_uof_image *uof_image;
915         struct icp_qat_uclo_aedata *ae_data;
916         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
917
918         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
919                 if (!test_bit(ae,
920                               (unsigned long *)&handle->hal_handle->ae_mask))
921                         continue;
922                 ae_data = &obj_handle->ae_data[ae];
923                 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
924                                       ICP_QAT_UCLO_MAX_CTX); s++) {
925                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
926                                 continue;
927                         uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
928                         if (qat_hal_set_ae_ctx_mode(handle, ae,
929                                                     (char)ICP_QAT_CTX_MODE
930                                                     (uof_image->ae_mode))) {
931                                 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
932                                 return -EFAULT;
933                         }
934                         nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
935                         if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
936                                 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
937                                 return -EFAULT;
938                         }
939                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
940                                                    (char)ICP_QAT_LOC_MEM0_MODE
941                                                    (uof_image->ae_mode))) {
942                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
943                                 return -EFAULT;
944                         }
945                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
946                                                    (char)ICP_QAT_LOC_MEM1_MODE
947                                                    (uof_image->ae_mode))) {
948                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
949                                 return -EFAULT;
950                         }
951                 }
952         }
953         return 0;
954 }
955
956 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
957 {
958         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
959         struct icp_qat_uclo_encapme *image;
960         int a;
961
962         for (a = 0; a < obj_handle->uimage_num; a++) {
963                 image = &obj_handle->ae_uimage[a];
964                 image->uwords_num = image->page->beg_addr_p +
965                                         image->page->micro_words_num;
966         }
967 }
968
969 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
970 {
971         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
972         unsigned int ae;
973
974         obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
975         obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
976                                              obj_handle->obj_hdr->file_buff;
977         obj_handle->uword_in_bytes = 6;
978         obj_handle->prod_type = qat_uclo_get_dev_type(handle);
979         obj_handle->prod_rev = PID_MAJOR_REV |
980                         (PID_MINOR_REV & handle->hal_handle->revision_id);
981         if (qat_uclo_check_uof_compat(obj_handle)) {
982                 pr_err("QAT: UOF incompatible\n");
983                 return -EINVAL;
984         }
985         obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
986                                         GFP_KERNEL);
987         if (!obj_handle->uword_buf)
988                 return -ENOMEM;
989         obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
990         if (!obj_handle->obj_hdr->file_buff ||
991             !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
992                                     &obj_handle->str_table)) {
993                 pr_err("QAT: UOF doesn't have effective images\n");
994                 goto out_err;
995         }
996         obj_handle->uimage_num =
997                 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
998                                     ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
999         if (!obj_handle->uimage_num)
1000                 goto out_err;
1001         if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1002                 pr_err("QAT: Bad object\n");
1003                 goto out_check_uof_aemask_err;
1004         }
1005         qat_uclo_init_uword_num(handle);
1006         qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1007                                    &obj_handle->init_mem_tab);
1008         if (qat_uclo_set_ae_mode(handle))
1009                 goto out_check_uof_aemask_err;
1010         return 0;
1011 out_check_uof_aemask_err:
1012         for (ae = 0; ae < obj_handle->uimage_num; ae++)
1013                 kfree(obj_handle->ae_uimage[ae].page);
1014 out_err:
1015         kfree(obj_handle->uword_buf);
1016         return -EFAULT;
1017 }
1018
1019 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1020                                       struct icp_qat_suof_filehdr *suof_ptr,
1021                                       int suof_size)
1022 {
1023         unsigned int check_sum = 0;
1024         unsigned int min_ver_offset = 0;
1025         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1026
1027         suof_handle->file_id = ICP_QAT_SUOF_FID;
1028         suof_handle->suof_buf = (char *)suof_ptr;
1029         suof_handle->suof_size = suof_size;
1030         min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1031                                               min_ver);
1032         check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1033                                                min_ver_offset);
1034         if (check_sum != suof_ptr->check_sum) {
1035                 pr_err("QAT: incorrect SUOF checksum\n");
1036                 return -EINVAL;
1037         }
1038         suof_handle->check_sum = suof_ptr->check_sum;
1039         suof_handle->min_ver = suof_ptr->min_ver;
1040         suof_handle->maj_ver = suof_ptr->maj_ver;
1041         suof_handle->fw_type = suof_ptr->fw_type;
1042         return 0;
1043 }
1044
1045 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1046                               struct icp_qat_suof_img_hdr *suof_img_hdr,
1047                               struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1048 {
1049         struct icp_qat_simg_ae_mode *ae_mode;
1050         struct icp_qat_suof_objhdr *suof_objhdr;
1051
1052         suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1053                                    suof_chunk_hdr->offset +
1054                                    sizeof(*suof_objhdr));
1055         suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1056                                   (suof_handle->suof_buf +
1057                                    suof_chunk_hdr->offset))->img_length;
1058
1059         suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1060         suof_img_hdr->css_key = (suof_img_hdr->css_header +
1061                                  sizeof(struct icp_qat_css_hdr));
1062         suof_img_hdr->css_signature = suof_img_hdr->css_key +
1063                                       ICP_QAT_CSS_FWSK_MODULUS_LEN +
1064                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1065         suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1066                                  ICP_QAT_CSS_SIGNATURE_LEN;
1067
1068         ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1069         suof_img_hdr->ae_mask = ae_mode->ae_mask;
1070         suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1071         suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1072         suof_img_hdr->fw_type = ae_mode->fw_type;
1073 }
1074
1075 static void
1076 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1077                           struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1078 {
1079         char **sym_str = (char **)&suof_handle->sym_str;
1080         unsigned int *sym_size = &suof_handle->sym_size;
1081         struct icp_qat_suof_strtable *str_table_obj;
1082
1083         *sym_size = *(unsigned int *)(uintptr_t)
1084                    (suof_chunk_hdr->offset + suof_handle->suof_buf);
1085         *sym_str = (char *)(uintptr_t)
1086                    (suof_handle->suof_buf + suof_chunk_hdr->offset +
1087                    sizeof(str_table_obj->tab_length));
1088 }
1089
1090 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1091                                       struct icp_qat_suof_img_hdr *img_hdr)
1092 {
1093         struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1094         unsigned int prod_rev, maj_ver, prod_type;
1095
1096         prod_type = qat_uclo_get_dev_type(handle);
1097         img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1098         prod_rev = PID_MAJOR_REV |
1099                          (PID_MINOR_REV & handle->hal_handle->revision_id);
1100         if (img_ae_mode->dev_type != prod_type) {
1101                 pr_err("QAT: incompatible product type %x\n",
1102                        img_ae_mode->dev_type);
1103                 return -EINVAL;
1104         }
1105         maj_ver = prod_rev & 0xff;
1106         if ((maj_ver > img_ae_mode->devmax_ver) ||
1107             (maj_ver < img_ae_mode->devmin_ver)) {
1108                 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1109                 return -EINVAL;
1110         }
1111         return 0;
1112 }
1113
1114 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1115 {
1116         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1117
1118         kfree(sobj_handle->img_table.simg_hdr);
1119         sobj_handle->img_table.simg_hdr = NULL;
1120         kfree(handle->sobj_handle);
1121         handle->sobj_handle = NULL;
1122 }
1123
1124 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1125                               unsigned int img_id, unsigned int num_simgs)
1126 {
1127         struct icp_qat_suof_img_hdr img_header;
1128
1129         if (img_id != num_simgs - 1) {
1130                 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1131                        sizeof(*suof_img_hdr));
1132                 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1133                        sizeof(*suof_img_hdr));
1134                 memcpy(&suof_img_hdr[img_id], &img_header,
1135                        sizeof(*suof_img_hdr));
1136         }
1137 }
1138
1139 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1140                              struct icp_qat_suof_filehdr *suof_ptr,
1141                              int suof_size)
1142 {
1143         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1144         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1145         struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1146         int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1147         unsigned int i = 0;
1148         struct icp_qat_suof_img_hdr img_header;
1149
1150         if (!suof_ptr || (suof_size == 0)) {
1151                 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1152                 return -EINVAL;
1153         }
1154         if (qat_uclo_check_suof_format(suof_ptr))
1155                 return -EINVAL;
1156         ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1157         if (ret)
1158                 return ret;
1159         suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1160                          ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1161
1162         qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1163         suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1164
1165         if (suof_handle->img_table.num_simgs != 0) {
1166                 suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
1167                                        sizeof(img_header), GFP_KERNEL);
1168                 if (!suof_img_hdr)
1169                         return -ENOMEM;
1170                 suof_handle->img_table.simg_hdr = suof_img_hdr;
1171         }
1172
1173         for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1174                 qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1175                                   &suof_chunk_hdr[1 + i]);
1176                 ret = qat_uclo_check_simg_compat(handle,
1177                                                  &suof_img_hdr[i]);
1178                 if (ret)
1179                         return ret;
1180                 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1181                         ae0_img = i;
1182         }
1183         qat_uclo_tail_img(suof_img_hdr, ae0_img,
1184                           suof_handle->img_table.num_simgs);
1185         return 0;
1186 }
1187
1188 #define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
1189 #define BITS_IN_DWORD 32
1190
1191 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1192                             struct icp_qat_fw_auth_desc *desc)
1193 {
1194         unsigned int fcu_sts, retry = 0;
1195         u64 bus_addr;
1196
1197         bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1198                            - sizeof(struct icp_qat_auth_chunk);
1199         SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1200         SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1201         SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1202
1203         do {
1204                 msleep(FW_AUTH_WAIT_PERIOD);
1205                 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1206                 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1207                         goto auth_fail;
1208                 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1209                         if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1210                                 return 0;
1211         } while (retry++ < FW_AUTH_MAX_RETRY);
1212 auth_fail:
1213         pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1214                fcu_sts & FCU_AUTH_STS_MASK, retry);
1215         return -EINVAL;
1216 }
1217
1218 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1219                                struct icp_firml_dram_desc *dram_desc,
1220                                unsigned int size)
1221 {
1222         void *vptr;
1223         dma_addr_t ptr;
1224
1225         vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1226                                   size, &ptr, GFP_KERNEL);
1227         if (!vptr)
1228                 return -ENOMEM;
1229         dram_desc->dram_base_addr_v = vptr;
1230         dram_desc->dram_bus_addr = ptr;
1231         dram_desc->dram_size = size;
1232         return 0;
1233 }
1234
1235 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1236                                struct icp_firml_dram_desc *dram_desc)
1237 {
1238         dma_free_coherent(&handle->pci_dev->dev,
1239                           (size_t)(dram_desc->dram_size),
1240                           (dram_desc->dram_base_addr_v),
1241                           dram_desc->dram_bus_addr);
1242         memset(dram_desc, 0, sizeof(*dram_desc));
1243 }
1244
1245 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1246                                    struct icp_qat_fw_auth_desc **desc)
1247 {
1248         struct icp_firml_dram_desc dram_desc;
1249
1250         dram_desc.dram_base_addr_v = *desc;
1251         dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1252                                    (*desc))->chunk_bus_addr;
1253         dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1254                                (*desc))->chunk_size;
1255         qat_uclo_simg_free(handle, &dram_desc);
1256 }
1257
1258 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1259                                 char *image, unsigned int size,
1260                                 struct icp_qat_fw_auth_desc **desc)
1261 {
1262         struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1263         struct icp_qat_fw_auth_desc *auth_desc;
1264         struct icp_qat_auth_chunk *auth_chunk;
1265         u64 virt_addr,  bus_addr, virt_base;
1266         unsigned int length, simg_offset = sizeof(*auth_chunk);
1267         struct icp_firml_dram_desc img_desc;
1268
1269         if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1270                 pr_err("QAT: error, input image size overflow %d\n", size);
1271                 return -EINVAL;
1272         }
1273         length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1274                  ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1275                  size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1276         if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1277                 pr_err("QAT: error, allocate continuous dram fail\n");
1278                 return -ENOMEM;
1279         }
1280
1281         auth_chunk = img_desc.dram_base_addr_v;
1282         auth_chunk->chunk_size = img_desc.dram_size;
1283         auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1284         virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1285         bus_addr  = img_desc.dram_bus_addr + simg_offset;
1286         auth_desc = img_desc.dram_base_addr_v;
1287         auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1288         auth_desc->css_hdr_low = (unsigned int)bus_addr;
1289         virt_addr = virt_base;
1290
1291         memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1292         /* pub key */
1293         bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1294                            sizeof(*css_hdr);
1295         virt_addr = virt_addr + sizeof(*css_hdr);
1296
1297         auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1298         auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1299
1300         memcpy((void *)(uintptr_t)virt_addr,
1301                (void *)(image + sizeof(*css_hdr)),
1302                ICP_QAT_CSS_FWSK_MODULUS_LEN);
1303         /* padding */
1304         memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1305                0, ICP_QAT_CSS_FWSK_PAD_LEN);
1306
1307         /* exponent */
1308         memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1309                ICP_QAT_CSS_FWSK_PAD_LEN),
1310                (void *)(image + sizeof(*css_hdr) +
1311                         ICP_QAT_CSS_FWSK_MODULUS_LEN),
1312                sizeof(unsigned int));
1313
1314         /* signature */
1315         bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1316                             auth_desc->fwsk_pub_low) +
1317                    ICP_QAT_CSS_FWSK_PUB_LEN;
1318         virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1319         auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1320         auth_desc->signature_low = (unsigned int)bus_addr;
1321
1322         memcpy((void *)(uintptr_t)virt_addr,
1323                (void *)(image + sizeof(*css_hdr) +
1324                ICP_QAT_CSS_FWSK_MODULUS_LEN +
1325                ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1326                ICP_QAT_CSS_SIGNATURE_LEN);
1327
1328         bus_addr = ADD_ADDR(auth_desc->signature_high,
1329                             auth_desc->signature_low) +
1330                    ICP_QAT_CSS_SIGNATURE_LEN;
1331         virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1332
1333         auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1334         auth_desc->img_low = (unsigned int)bus_addr;
1335         auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1336         memcpy((void *)(uintptr_t)virt_addr,
1337                (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1338                auth_desc->img_len);
1339         virt_addr = virt_base;
1340         /* AE firmware */
1341         if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1342             CSS_AE_FIRMWARE) {
1343                 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1344                 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1345                 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1346                                     auth_desc->img_ae_mode_data_low) +
1347                            sizeof(struct icp_qat_simg_ae_mode);
1348
1349                 auth_desc->img_ae_init_data_high = (unsigned int)
1350                                                  (bus_addr >> BITS_IN_DWORD);
1351                 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1352                 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1353                 auth_desc->img_ae_insts_high = (unsigned int)
1354                                              (bus_addr >> BITS_IN_DWORD);
1355                 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1356         } else {
1357                 auth_desc->img_ae_insts_high = auth_desc->img_high;
1358                 auth_desc->img_ae_insts_low = auth_desc->img_low;
1359         }
1360         *desc = auth_desc;
1361         return 0;
1362 }
1363
1364 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1365                             struct icp_qat_fw_auth_desc *desc)
1366 {
1367         unsigned int i;
1368         unsigned int fcu_sts;
1369         struct icp_qat_simg_ae_mode *virt_addr;
1370         unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1371
1372         virt_addr = (void *)((uintptr_t)desc +
1373                      sizeof(struct icp_qat_auth_chunk) +
1374                      sizeof(struct icp_qat_css_hdr) +
1375                      ICP_QAT_CSS_FWSK_PUB_LEN +
1376                      ICP_QAT_CSS_SIGNATURE_LEN);
1377         for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1378                 int retry = 0;
1379
1380                 if (!((virt_addr->ae_mask >> i) & 0x1))
1381                         continue;
1382                 if (qat_hal_check_ae_active(handle, i)) {
1383                         pr_err("QAT: AE %d is active\n", i);
1384                         return -EINVAL;
1385                 }
1386                 SET_CAP_CSR(handle, FCU_CONTROL,
1387                             (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1388
1389                 do {
1390                         msleep(FW_AUTH_WAIT_PERIOD);
1391                         fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1392                         if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1393                             FCU_STS_LOAD_DONE) &&
1394                             ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1395                                 break;
1396                 } while (retry++ < FW_AUTH_MAX_RETRY);
1397                 if (retry > FW_AUTH_MAX_RETRY) {
1398                         pr_err("QAT: firmware load failed timeout %x\n", retry);
1399                         return -EINVAL;
1400                 }
1401         }
1402         return 0;
1403 }
1404
1405 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1406                                  void *addr_ptr, int mem_size)
1407 {
1408         struct icp_qat_suof_handle *suof_handle;
1409
1410         suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1411         if (!suof_handle)
1412                 return -ENOMEM;
1413         handle->sobj_handle = suof_handle;
1414         if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1415                 qat_uclo_del_suof(handle);
1416                 pr_err("QAT: map SUOF failed\n");
1417                 return -EINVAL;
1418         }
1419         return 0;
1420 }
1421
1422 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1423                        void *addr_ptr, int mem_size)
1424 {
1425         struct icp_qat_fw_auth_desc *desc = NULL;
1426         int status = 0;
1427
1428         if (handle->fw_auth) {
1429                 if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1430                         status = qat_uclo_auth_fw(handle, desc);
1431                 qat_uclo_ummap_auth_fw(handle, &desc);
1432         } else {
1433                 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1434                         pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1435                         return -EINVAL;
1436                 }
1437                 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1438         }
1439         return status;
1440 }
1441
1442 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1443                                 void *addr_ptr, int mem_size)
1444 {
1445         struct icp_qat_uof_filehdr *filehdr;
1446         struct icp_qat_uclo_objhandle *objhdl;
1447
1448         objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1449         if (!objhdl)
1450                 return -ENOMEM;
1451         objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1452         if (!objhdl->obj_buf)
1453                 goto out_objbuf_err;
1454         filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1455         if (qat_uclo_check_uof_format(filehdr))
1456                 goto out_objhdr_err;
1457         objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1458                                              ICP_QAT_UOF_OBJS);
1459         if (!objhdl->obj_hdr) {
1460                 pr_err("QAT: object file chunk is null\n");
1461                 goto out_objhdr_err;
1462         }
1463         handle->obj_handle = objhdl;
1464         if (qat_uclo_parse_uof_obj(handle))
1465                 goto out_overlay_obj_err;
1466         return 0;
1467
1468 out_overlay_obj_err:
1469         handle->obj_handle = NULL;
1470         kfree(objhdl->obj_hdr);
1471 out_objhdr_err:
1472         kfree(objhdl->obj_buf);
1473 out_objbuf_err:
1474         kfree(objhdl);
1475         return -ENOMEM;
1476 }
1477
1478 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1479                      void *addr_ptr, int mem_size)
1480 {
1481         BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1482                      (sizeof(handle->hal_handle->ae_mask) * 8));
1483
1484         if (!handle || !addr_ptr || mem_size < 24)
1485                 return -EINVAL;
1486
1487         return (handle->fw_auth) ?
1488                         qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1489                         qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1490 }
1491
1492 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1493 {
1494         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1495         unsigned int a;
1496
1497         if (handle->sobj_handle)
1498                 qat_uclo_del_suof(handle);
1499         if (!obj_handle)
1500                 return;
1501
1502         kfree(obj_handle->uword_buf);
1503         for (a = 0; a < obj_handle->uimage_num; a++)
1504                 kfree(obj_handle->ae_uimage[a].page);
1505
1506         for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1507                 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1508
1509         kfree(obj_handle->obj_hdr);
1510         kfree(obj_handle->obj_buf);
1511         kfree(obj_handle);
1512         handle->obj_handle = NULL;
1513 }
1514
1515 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1516                                  struct icp_qat_uclo_encap_page *encap_page,
1517                                  uint64_t *uword, unsigned int addr_p,
1518                                  unsigned int raddr, uint64_t fill)
1519 {
1520         uint64_t uwrd = 0;
1521         unsigned int i;
1522
1523         if (!encap_page) {
1524                 *uword = fill;
1525                 return;
1526         }
1527         for (i = 0; i < encap_page->uwblock_num; i++) {
1528                 if (raddr >= encap_page->uwblock[i].start_addr &&
1529                     raddr <= encap_page->uwblock[i].start_addr +
1530                     encap_page->uwblock[i].words_num - 1) {
1531                         raddr -= encap_page->uwblock[i].start_addr;
1532                         raddr *= obj_handle->uword_in_bytes;
1533                         memcpy(&uwrd, (void *)(((uintptr_t)
1534                                encap_page->uwblock[i].micro_words) + raddr),
1535                                obj_handle->uword_in_bytes);
1536                         uwrd = uwrd & 0xbffffffffffull;
1537                 }
1538         }
1539         *uword = uwrd;
1540         if (*uword == INVLD_UWORD)
1541                 *uword = fill;
1542 }
1543
1544 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1545                                         struct icp_qat_uclo_encap_page
1546                                         *encap_page, unsigned int ae)
1547 {
1548         unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1549         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1550         uint64_t fill_pat;
1551
1552         /* load the page starting at appropriate ustore address */
1553         /* get fill-pattern from an image -- they are all the same */
1554         memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1555                sizeof(uint64_t));
1556         uw_physical_addr = encap_page->beg_addr_p;
1557         uw_relative_addr = 0;
1558         words_num = encap_page->micro_words_num;
1559         while (words_num) {
1560                 if (words_num < UWORD_CPYBUF_SIZE)
1561                         cpylen = words_num;
1562                 else
1563                         cpylen = UWORD_CPYBUF_SIZE;
1564
1565                 /* load the buffer */
1566                 for (i = 0; i < cpylen; i++)
1567                         qat_uclo_fill_uwords(obj_handle, encap_page,
1568                                              &obj_handle->uword_buf[i],
1569                                              uw_physical_addr + i,
1570                                              uw_relative_addr + i, fill_pat);
1571
1572                 /* copy the buffer to ustore */
1573                 qat_hal_wr_uwords(handle, (unsigned char)ae,
1574                                   uw_physical_addr, cpylen,
1575                                   obj_handle->uword_buf);
1576
1577                 uw_physical_addr += cpylen;
1578                 uw_relative_addr += cpylen;
1579                 words_num -= cpylen;
1580         }
1581 }
1582
1583 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1584                                     struct icp_qat_uof_image *image)
1585 {
1586         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1587         unsigned int ctx_mask, s;
1588         struct icp_qat_uclo_page *page;
1589         unsigned char ae;
1590         int ctx;
1591
1592         if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1593                 ctx_mask = 0xff;
1594         else
1595                 ctx_mask = 0x55;
1596         /* load the default page and set assigned CTX PC
1597          * to the entrypoint address */
1598         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1599                 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1600                         continue;
1601                 /* find the slice to which this image is assigned */
1602                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1603                         if (image->ctx_assigned & obj_handle->ae_data[ae].
1604                             ae_slices[s].ctx_mask_assigned)
1605                                 break;
1606                 }
1607                 if (s >= obj_handle->ae_data[ae].slice_num)
1608                         continue;
1609                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1610                 if (!page->encap_page->def_page)
1611                         continue;
1612                 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1613
1614                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1615                 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1616                         obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1617                                         (ctx_mask & (1 << ctx)) ? page : NULL;
1618                 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1619                                      image->ctx_assigned);
1620                 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1621                                image->entry_address);
1622         }
1623 }
1624
1625 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1626 {
1627         unsigned int i;
1628         struct icp_qat_fw_auth_desc *desc = NULL;
1629         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1630         struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1631
1632         for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1633                 if (qat_uclo_map_auth_fw(handle,
1634                                          (char *)simg_hdr[i].simg_buf,
1635                                          (unsigned int)
1636                                          (simg_hdr[i].simg_len),
1637                                          &desc))
1638                         goto wr_err;
1639                 if (qat_uclo_auth_fw(handle, desc))
1640                         goto wr_err;
1641                 if (qat_uclo_load_fw(handle, desc))
1642                         goto wr_err;
1643                 qat_uclo_ummap_auth_fw(handle, &desc);
1644         }
1645         return 0;
1646 wr_err:
1647         qat_uclo_ummap_auth_fw(handle, &desc);
1648         return -EINVAL;
1649 }
1650
1651 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1652 {
1653         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1654         unsigned int i;
1655
1656         if (qat_uclo_init_globals(handle))
1657                 return -EINVAL;
1658         for (i = 0; i < obj_handle->uimage_num; i++) {
1659                 if (!obj_handle->ae_uimage[i].img_ptr)
1660                         return -EINVAL;
1661                 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1662                         return -EINVAL;
1663                 qat_uclo_wr_uimage_page(handle,
1664                                         obj_handle->ae_uimage[i].img_ptr);
1665         }
1666         return 0;
1667 }
1668
1669 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1670 {
1671         return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1672                                    qat_uclo_wr_uof_img(handle);
1673 }