2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 if (!ae_slice->region)
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
94 kfree(ae_slice->region);
95 ae_slice->region = NULL;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i = 0; i < ae_data->slice_num; i++) {
109 kfree(ae_data->ae_slices[i].region);
110 ae_data->ae_slices[i].region = NULL;
111 kfree(ae_data->ae_slices[i].page);
112 ae_data->ae_slices[i].page = NULL;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
122 return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
144 int maj = suof_hdr->maj_ver & 0xff;
145 int min = suof_hdr->min_ver & 0xff;
147 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
151 if (suof_hdr->fw_type != 0) {
152 pr_err("QAT: unsupported firmware type\n");
155 if (suof_hdr->num_chunks <= 0x1) {
156 pr_err("QAT: SUOF chunk amount is incorrect\n");
159 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168 unsigned int addr, unsigned int *val,
169 unsigned int num_in_bytes)
172 unsigned char *ptr = (unsigned char *)val;
174 while (num_in_bytes) {
175 memcpy(&outval, ptr, 4);
176 SRAM_WRITE(handle, addr, outval);
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184 unsigned char ae, unsigned int addr,
186 unsigned int num_in_bytes)
189 unsigned char *ptr = (unsigned char *)val;
191 addr >>= 0x2; /* convert to uword address */
193 while (num_in_bytes) {
194 memcpy(&outval, ptr, 4);
195 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
203 struct icp_qat_uof_batch_init
206 struct icp_qat_uof_batch_init *umem_init;
208 if (!umem_init_header)
210 umem_init = umem_init_header->next;
212 unsigned int addr, *value, size;
215 addr = umem_init->addr;
216 value = umem_init->value;
217 size = umem_init->size;
218 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219 umem_init = umem_init->next;
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225 struct icp_qat_uof_batch_init **base)
227 struct icp_qat_uof_batch_init *umem_init;
231 struct icp_qat_uof_batch_init *pre;
234 umem_init = umem_init->next;
240 static int qat_uclo_parse_num(char *str, unsigned int *num)
243 unsigned long ae = 0;
246 strncpy(buf, str, 15);
247 for (i = 0; i < 16; i++) {
248 if (!isdigit(buf[i])) {
253 if ((kstrtoul(buf, 10, &ae)))
256 *num = (unsigned int)ae;
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261 struct icp_qat_uof_initmem *init_mem,
262 unsigned int size_range, unsigned int *ae)
264 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
267 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268 pr_err("QAT: initmem is out of range");
271 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272 pr_err("QAT: Memory scope for init_mem error\n");
275 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
277 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
280 if (qat_uclo_parse_num(str, ae)) {
281 pr_err("QAT: Parse num for AE number failed\n");
284 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285 pr_err("QAT: ae %d out of range\n", *ae);
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292 *handle, struct icp_qat_uof_initmem
293 *init_mem, unsigned int ae,
294 struct icp_qat_uof_batch_init
297 struct icp_qat_uof_batch_init *init_header, *tail;
298 struct icp_qat_uof_batch_init *mem_init, *tail_old;
299 struct icp_qat_uof_memvar_attr *mem_val_attr;
300 unsigned int i, flag = 0;
303 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304 sizeof(struct icp_qat_uof_initmem));
306 init_header = *init_tab_base;
308 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
311 init_header->size = 1;
312 *init_tab_base = init_header;
315 tail_old = init_header;
316 while (tail_old->next)
317 tail_old = tail_old->next;
319 for (i = 0; i < init_mem->val_attr_num; i++) {
320 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
324 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325 mem_init->value = &mem_val_attr->value;
327 mem_init->next = NULL;
328 tail->next = mem_init;
330 init_header->size += qat_hal_get_ins_num();
335 /* Do not free the list head unless we allocated it. */
336 tail_old = tail_old->next;
338 kfree(*init_tab_base);
339 *init_tab_base = NULL;
343 mem_init = tail_old->next;
350 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
351 struct icp_qat_uof_initmem *init_mem)
353 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
356 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
357 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
359 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
360 &obj_handle->lm_init_tab[ae]))
365 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
366 struct icp_qat_uof_initmem *init_mem)
368 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
369 unsigned int ae, ustore_size, uaddr, i;
371 ustore_size = obj_handle->ustore_phy_size;
372 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
374 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
375 &obj_handle->umem_init_tab[ae]))
377 /* set the highest ustore address referenced */
378 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
379 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
380 if (obj_handle->ae_data[ae].ae_slices[i].
381 encap_image->uwords_num < uaddr)
382 obj_handle->ae_data[ae].ae_slices[i].
383 encap_image->uwords_num = uaddr;
388 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
389 struct icp_qat_uof_initmem *init_mem)
391 switch (init_mem->region) {
392 case ICP_QAT_UOF_LMEM_REGION:
393 if (qat_uclo_init_lmem_seg(handle, init_mem))
396 case ICP_QAT_UOF_UMEM_REGION:
397 if (qat_uclo_init_umem_seg(handle, init_mem))
401 pr_err("QAT: initmem region error. region type=0x%x\n",
408 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
409 struct icp_qat_uclo_encapme *image)
412 struct icp_qat_uclo_encap_page *page;
413 struct icp_qat_uof_image *uof_image;
415 unsigned int ustore_size;
416 unsigned int patt_pos;
417 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
420 uof_image = image->img_ptr;
421 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
425 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
426 memcpy(&fill_data[i], &uof_image->fill_pattern,
430 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
431 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
433 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
434 patt_pos = page->beg_addr_p + page->micro_words_num;
436 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
437 page->beg_addr_p, &fill_data[0]);
438 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
439 ustore_size - patt_pos + 1,
440 &fill_data[page->beg_addr_p]);
446 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
449 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
450 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
452 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
453 if (initmem->num_in_bytes) {
454 if (qat_uclo_init_ae_memory(handle, initmem))
457 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
459 sizeof(struct icp_qat_uof_initmem)) +
460 (sizeof(struct icp_qat_uof_memvar_attr) *
461 initmem->val_attr_num));
463 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
464 if (qat_hal_batch_wr_lm(handle, ae,
465 obj_handle->lm_init_tab[ae])) {
466 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
469 qat_uclo_cleanup_batch_init_list(handle,
470 &obj_handle->lm_init_tab[ae]);
471 qat_uclo_batch_wr_umem(handle, ae,
472 obj_handle->umem_init_tab[ae]);
473 qat_uclo_cleanup_batch_init_list(handle,
480 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
481 char *chunk_id, void *cur)
484 struct icp_qat_uof_chunkhdr *chunk_hdr =
485 (struct icp_qat_uof_chunkhdr *)
486 ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
488 for (i = 0; i < obj_hdr->num_chunks; i++) {
489 if ((cur < (void *)&chunk_hdr[i]) &&
490 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
491 ICP_QAT_UOF_OBJID_LEN)) {
492 return &chunk_hdr[i];
498 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
501 unsigned int topbit = 1 << 0xF;
502 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
504 reg ^= inbyte << 0x8;
505 for (i = 0; i < 0x8; i++) {
507 reg = (reg << 1) ^ 0x1021;
514 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
516 unsigned int chksum = 0;
520 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
524 static struct icp_qat_uclo_objhdr *
525 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
528 struct icp_qat_uof_filechunkhdr *file_chunk;
529 struct icp_qat_uclo_objhdr *obj_hdr;
533 file_chunk = (struct icp_qat_uof_filechunkhdr *)
534 (buf + sizeof(struct icp_qat_uof_filehdr));
535 for (i = 0; i < file_hdr->num_chunks; i++) {
536 if (!strncmp(file_chunk->chunk_id, chunk_id,
537 ICP_QAT_UOF_OBJID_LEN)) {
538 chunk = buf + file_chunk->offset;
539 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
540 chunk, file_chunk->size))
542 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
545 obj_hdr->file_buff = chunk;
546 obj_hdr->checksum = file_chunk->checksum;
547 obj_hdr->size = file_chunk->size;
556 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
557 struct icp_qat_uof_image *image)
559 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
560 struct icp_qat_uof_objtable *neigh_reg_tab;
561 struct icp_qat_uof_code_page *code_page;
563 code_page = (struct icp_qat_uof_code_page *)
564 ((char *)image + sizeof(struct icp_qat_uof_image));
565 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
566 code_page->uc_var_tab_offset);
567 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
568 code_page->imp_var_tab_offset);
569 imp_expr_tab = (struct icp_qat_uof_objtable *)
570 (encap_uof_obj->beg_uof +
571 code_page->imp_expr_tab_offset);
572 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
573 imp_expr_tab->entry_num) {
574 pr_err("QAT: UOF can't contain imported variable to be parsed");
577 neigh_reg_tab = (struct icp_qat_uof_objtable *)
578 (encap_uof_obj->beg_uof +
579 code_page->neigh_reg_tab_offset);
580 if (neigh_reg_tab->entry_num) {
581 pr_err("QAT: UOF can't contain shared control store feature");
584 if (image->numpages > 1) {
585 pr_err("QAT: UOF can't contain multiple pages");
588 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
589 pr_err("QAT: UOF can't use shared control store feature");
592 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
593 pr_err("QAT: UOF can't use reloadable feature");
599 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
601 struct icp_qat_uof_image *img,
602 struct icp_qat_uclo_encap_page *page)
604 struct icp_qat_uof_code_page *code_page;
605 struct icp_qat_uof_code_area *code_area;
606 struct icp_qat_uof_objtable *uword_block_tab;
607 struct icp_qat_uof_uword_block *uwblock;
610 code_page = (struct icp_qat_uof_code_page *)
611 ((char *)img + sizeof(struct icp_qat_uof_image));
612 page->def_page = code_page->def_page;
613 page->page_region = code_page->page_region;
614 page->beg_addr_v = code_page->beg_addr_v;
615 page->beg_addr_p = code_page->beg_addr_p;
616 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
617 code_page->code_area_offset);
618 page->micro_words_num = code_area->micro_words_num;
619 uword_block_tab = (struct icp_qat_uof_objtable *)
620 (encap_uof_obj->beg_uof +
621 code_area->uword_block_tab);
622 page->uwblock_num = uword_block_tab->entry_num;
623 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
624 sizeof(struct icp_qat_uof_objtable));
625 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
626 for (i = 0; i < uword_block_tab->entry_num; i++)
627 page->uwblock[i].micro_words =
628 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
631 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
632 struct icp_qat_uclo_encapme *ae_uimage,
636 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
637 struct icp_qat_uof_image *image;
638 struct icp_qat_uof_objtable *ae_regtab;
639 struct icp_qat_uof_objtable *init_reg_sym_tab;
640 struct icp_qat_uof_objtable *sbreak_tab;
641 struct icp_qat_uof_encap_obj *encap_uof_obj =
642 &obj_handle->encap_uof_obj;
644 for (j = 0; j < max_image; j++) {
645 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
646 ICP_QAT_UOF_IMAG, chunk_hdr);
649 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
651 ae_regtab = (struct icp_qat_uof_objtable *)
652 (image->reg_tab_offset +
653 obj_handle->obj_hdr->file_buff);
654 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
655 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
656 (((char *)ae_regtab) +
657 sizeof(struct icp_qat_uof_objtable));
658 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
659 (image->init_reg_sym_tab +
660 obj_handle->obj_hdr->file_buff);
661 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
662 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
663 (((char *)init_reg_sym_tab) +
664 sizeof(struct icp_qat_uof_objtable));
665 sbreak_tab = (struct icp_qat_uof_objtable *)
666 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
667 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
668 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
669 (((char *)sbreak_tab) +
670 sizeof(struct icp_qat_uof_objtable));
671 ae_uimage[j].img_ptr = image;
672 if (qat_uclo_check_image_compat(encap_uof_obj, image))
675 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
677 if (!ae_uimage[j].page)
679 qat_uclo_map_image_page(encap_uof_obj, image,
684 for (i = 0; i < j; i++)
685 kfree(ae_uimage[i].page);
689 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
693 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
695 for (ae = 0; ae < max_ae; ae++) {
697 (unsigned long *)&handle->hal_handle->ae_mask))
699 for (i = 0; i < obj_handle->uimage_num; i++) {
700 if (!test_bit(ae, (unsigned long *)
701 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
704 if (qat_uclo_init_ae_data(obj_handle, ae, i))
709 pr_err("QAT: uimage uses AE not set");
715 static struct icp_qat_uof_strtable *
716 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
717 char *tab_name, struct icp_qat_uof_strtable *str_table)
719 struct icp_qat_uof_chunkhdr *chunk_hdr;
721 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
722 obj_hdr->file_buff, tab_name, NULL);
726 memcpy(&str_table->table_len, obj_hdr->file_buff +
727 chunk_hdr->offset, sizeof(str_table->table_len));
728 hdr_size = (char *)&str_table->strings - (char *)str_table;
729 str_table->strings = (uintptr_t)obj_hdr->file_buff +
730 chunk_hdr->offset + hdr_size;
737 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
738 struct icp_qat_uclo_init_mem_table *init_mem_tab)
740 struct icp_qat_uof_chunkhdr *chunk_hdr;
742 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
743 ICP_QAT_UOF_IMEM, NULL);
745 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
746 chunk_hdr->offset, sizeof(unsigned int));
747 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
748 (encap_uof_obj->beg_uof + chunk_hdr->offset +
749 sizeof(unsigned int));
754 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
756 switch (handle->pci_dev->device) {
757 case ADF_DH895XCC_PCI_DEVICE_ID:
758 return ICP_QAT_AC_895XCC_DEV_TYPE;
759 case ADF_C62X_PCI_DEVICE_ID:
760 return ICP_QAT_AC_C62X_DEV_TYPE;
761 case ADF_C3XXX_PCI_DEVICE_ID:
762 return ICP_QAT_AC_C3XXX_DEV_TYPE;
764 pr_err("QAT: unsupported device 0x%x\n",
765 handle->pci_dev->device);
770 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
772 unsigned int maj_ver, prod_type = obj_handle->prod_type;
774 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
775 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
776 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
780 maj_ver = obj_handle->prod_rev & 0xff;
781 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
782 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
783 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
789 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
790 unsigned char ae, unsigned char ctx_mask,
791 enum icp_qat_uof_regtype reg_type,
792 unsigned short reg_addr, unsigned int value)
800 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
811 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
818 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
821 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
823 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
829 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
831 struct icp_qat_uclo_encapme *encap_ae)
834 unsigned char ctx_mask;
835 struct icp_qat_uof_init_regsym *init_regsym;
837 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
838 ICP_QAT_UCLO_MAX_CTX)
843 for (i = 0; i < encap_ae->init_regsym_num; i++) {
844 unsigned int exp_res;
846 init_regsym = &encap_ae->init_regsym[i];
847 exp_res = init_regsym->value;
848 switch (init_regsym->init_type) {
849 case ICP_QAT_UOF_INIT_REG:
850 qat_uclo_init_reg(handle, ae, ctx_mask,
851 (enum icp_qat_uof_regtype)
852 init_regsym->reg_type,
853 (unsigned short)init_regsym->reg_addr,
856 case ICP_QAT_UOF_INIT_REG_CTX:
857 /* check if ctx is appropriate for the ctxMode */
858 if (!((1 << init_regsym->ctx) & ctx_mask)) {
859 pr_err("QAT: invalid ctx num = 0x%x\n",
863 qat_uclo_init_reg(handle, ae,
865 (1 << init_regsym->ctx),
866 (enum icp_qat_uof_regtype)
867 init_regsym->reg_type,
868 (unsigned short)init_regsym->reg_addr,
871 case ICP_QAT_UOF_INIT_EXPR:
872 pr_err("QAT: INIT_EXPR feature not supported\n");
874 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
875 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
884 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
886 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
889 if (obj_handle->global_inited)
891 if (obj_handle->init_mem_tab.entry_num) {
892 if (qat_uclo_init_memory(handle)) {
893 pr_err("QAT: initialize memory failed\n");
897 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
898 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
899 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
901 if (qat_uclo_init_reg_sym(handle, ae,
902 obj_handle->ae_data[ae].
903 ae_slices[s].encap_image))
907 obj_handle->global_inited = 1;
911 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
913 unsigned char ae, nn_mode, s;
914 struct icp_qat_uof_image *uof_image;
915 struct icp_qat_uclo_aedata *ae_data;
916 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
918 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
920 (unsigned long *)&handle->hal_handle->ae_mask))
922 ae_data = &obj_handle->ae_data[ae];
923 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
924 ICP_QAT_UCLO_MAX_CTX); s++) {
925 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
927 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
928 if (qat_hal_set_ae_ctx_mode(handle, ae,
929 (char)ICP_QAT_CTX_MODE
930 (uof_image->ae_mode))) {
931 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
934 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
935 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
936 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
939 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
940 (char)ICP_QAT_LOC_MEM0_MODE
941 (uof_image->ae_mode))) {
942 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
945 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
946 (char)ICP_QAT_LOC_MEM1_MODE
947 (uof_image->ae_mode))) {
948 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
956 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
958 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
959 struct icp_qat_uclo_encapme *image;
962 for (a = 0; a < obj_handle->uimage_num; a++) {
963 image = &obj_handle->ae_uimage[a];
964 image->uwords_num = image->page->beg_addr_p +
965 image->page->micro_words_num;
969 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
971 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
974 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
975 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
976 obj_handle->obj_hdr->file_buff;
977 obj_handle->uword_in_bytes = 6;
978 obj_handle->prod_type = qat_uclo_get_dev_type(handle);
979 obj_handle->prod_rev = PID_MAJOR_REV |
980 (PID_MINOR_REV & handle->hal_handle->revision_id);
981 if (qat_uclo_check_uof_compat(obj_handle)) {
982 pr_err("QAT: UOF incompatible\n");
985 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
987 if (!obj_handle->uword_buf)
989 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
990 if (!obj_handle->obj_hdr->file_buff ||
991 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
992 &obj_handle->str_table)) {
993 pr_err("QAT: UOF doesn't have effective images\n");
996 obj_handle->uimage_num =
997 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
998 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
999 if (!obj_handle->uimage_num)
1001 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1002 pr_err("QAT: Bad object\n");
1003 goto out_check_uof_aemask_err;
1005 qat_uclo_init_uword_num(handle);
1006 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1007 &obj_handle->init_mem_tab);
1008 if (qat_uclo_set_ae_mode(handle))
1009 goto out_check_uof_aemask_err;
1011 out_check_uof_aemask_err:
1012 for (ae = 0; ae < obj_handle->uimage_num; ae++)
1013 kfree(obj_handle->ae_uimage[ae].page);
1015 kfree(obj_handle->uword_buf);
1019 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1020 struct icp_qat_suof_filehdr *suof_ptr,
1023 unsigned int check_sum = 0;
1024 unsigned int min_ver_offset = 0;
1025 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1027 suof_handle->file_id = ICP_QAT_SUOF_FID;
1028 suof_handle->suof_buf = (char *)suof_ptr;
1029 suof_handle->suof_size = suof_size;
1030 min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1032 check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1034 if (check_sum != suof_ptr->check_sum) {
1035 pr_err("QAT: incorrect SUOF checksum\n");
1038 suof_handle->check_sum = suof_ptr->check_sum;
1039 suof_handle->min_ver = suof_ptr->min_ver;
1040 suof_handle->maj_ver = suof_ptr->maj_ver;
1041 suof_handle->fw_type = suof_ptr->fw_type;
1045 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1046 struct icp_qat_suof_img_hdr *suof_img_hdr,
1047 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1049 struct icp_qat_simg_ae_mode *ae_mode;
1050 struct icp_qat_suof_objhdr *suof_objhdr;
1052 suof_img_hdr->simg_buf = (suof_handle->suof_buf +
1053 suof_chunk_hdr->offset +
1054 sizeof(*suof_objhdr));
1055 suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1056 (suof_handle->suof_buf +
1057 suof_chunk_hdr->offset))->img_length;
1059 suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1060 suof_img_hdr->css_key = (suof_img_hdr->css_header +
1061 sizeof(struct icp_qat_css_hdr));
1062 suof_img_hdr->css_signature = suof_img_hdr->css_key +
1063 ICP_QAT_CSS_FWSK_MODULUS_LEN +
1064 ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1065 suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1066 ICP_QAT_CSS_SIGNATURE_LEN;
1068 ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1069 suof_img_hdr->ae_mask = ae_mode->ae_mask;
1070 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1071 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1072 suof_img_hdr->fw_type = ae_mode->fw_type;
1076 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1077 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1079 char **sym_str = (char **)&suof_handle->sym_str;
1080 unsigned int *sym_size = &suof_handle->sym_size;
1081 struct icp_qat_suof_strtable *str_table_obj;
1083 *sym_size = *(unsigned int *)(uintptr_t)
1084 (suof_chunk_hdr->offset + suof_handle->suof_buf);
1085 *sym_str = (char *)(uintptr_t)
1086 (suof_handle->suof_buf + suof_chunk_hdr->offset +
1087 sizeof(str_table_obj->tab_length));
1090 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1091 struct icp_qat_suof_img_hdr *img_hdr)
1093 struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1094 unsigned int prod_rev, maj_ver, prod_type;
1096 prod_type = qat_uclo_get_dev_type(handle);
1097 img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1098 prod_rev = PID_MAJOR_REV |
1099 (PID_MINOR_REV & handle->hal_handle->revision_id);
1100 if (img_ae_mode->dev_type != prod_type) {
1101 pr_err("QAT: incompatible product type %x\n",
1102 img_ae_mode->dev_type);
1105 maj_ver = prod_rev & 0xff;
1106 if ((maj_ver > img_ae_mode->devmax_ver) ||
1107 (maj_ver < img_ae_mode->devmin_ver)) {
1108 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1114 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1116 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1118 kfree(sobj_handle->img_table.simg_hdr);
1119 sobj_handle->img_table.simg_hdr = NULL;
1120 kfree(handle->sobj_handle);
1121 handle->sobj_handle = NULL;
1124 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1125 unsigned int img_id, unsigned int num_simgs)
1127 struct icp_qat_suof_img_hdr img_header;
1129 if (img_id != num_simgs - 1) {
1130 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1131 sizeof(*suof_img_hdr));
1132 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1133 sizeof(*suof_img_hdr));
1134 memcpy(&suof_img_hdr[img_id], &img_header,
1135 sizeof(*suof_img_hdr));
1139 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1140 struct icp_qat_suof_filehdr *suof_ptr,
1143 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1144 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1145 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1146 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1148 struct icp_qat_suof_img_hdr img_header;
1150 if (!suof_ptr || (suof_size == 0)) {
1151 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1154 if (qat_uclo_check_suof_format(suof_ptr))
1156 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1159 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1160 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1162 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1163 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1165 if (suof_handle->img_table.num_simgs != 0) {
1166 suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
1167 sizeof(img_header), GFP_KERNEL);
1170 suof_handle->img_table.simg_hdr = suof_img_hdr;
1173 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1174 qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1175 &suof_chunk_hdr[1 + i]);
1176 ret = qat_uclo_check_simg_compat(handle,
1180 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1183 qat_uclo_tail_img(suof_img_hdr, ae0_img,
1184 suof_handle->img_table.num_simgs);
1188 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
1189 #define BITS_IN_DWORD 32
1191 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1192 struct icp_qat_fw_auth_desc *desc)
1194 unsigned int fcu_sts, retry = 0;
1197 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1198 - sizeof(struct icp_qat_auth_chunk);
1199 SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1200 SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1201 SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1204 msleep(FW_AUTH_WAIT_PERIOD);
1205 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1206 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1208 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1209 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1211 } while (retry++ < FW_AUTH_MAX_RETRY);
1213 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1214 fcu_sts & FCU_AUTH_STS_MASK, retry);
1218 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1219 struct icp_firml_dram_desc *dram_desc,
1225 vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1226 size, &ptr, GFP_KERNEL);
1229 dram_desc->dram_base_addr_v = vptr;
1230 dram_desc->dram_bus_addr = ptr;
1231 dram_desc->dram_size = size;
1235 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1236 struct icp_firml_dram_desc *dram_desc)
1238 dma_free_coherent(&handle->pci_dev->dev,
1239 (size_t)(dram_desc->dram_size),
1240 (dram_desc->dram_base_addr_v),
1241 dram_desc->dram_bus_addr);
1242 memset(dram_desc, 0, sizeof(*dram_desc));
1245 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1246 struct icp_qat_fw_auth_desc **desc)
1248 struct icp_firml_dram_desc dram_desc;
1250 dram_desc.dram_base_addr_v = *desc;
1251 dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1252 (*desc))->chunk_bus_addr;
1253 dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1254 (*desc))->chunk_size;
1255 qat_uclo_simg_free(handle, &dram_desc);
1258 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1259 char *image, unsigned int size,
1260 struct icp_qat_fw_auth_desc **desc)
1262 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1263 struct icp_qat_fw_auth_desc *auth_desc;
1264 struct icp_qat_auth_chunk *auth_chunk;
1265 u64 virt_addr, bus_addr, virt_base;
1266 unsigned int length, simg_offset = sizeof(*auth_chunk);
1267 struct icp_firml_dram_desc img_desc;
1269 if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1270 pr_err("QAT: error, input image size overflow %d\n", size);
1273 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1274 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1275 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1276 if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1277 pr_err("QAT: error, allocate continuous dram fail\n");
1281 auth_chunk = img_desc.dram_base_addr_v;
1282 auth_chunk->chunk_size = img_desc.dram_size;
1283 auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1284 virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1285 bus_addr = img_desc.dram_bus_addr + simg_offset;
1286 auth_desc = img_desc.dram_base_addr_v;
1287 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1288 auth_desc->css_hdr_low = (unsigned int)bus_addr;
1289 virt_addr = virt_base;
1291 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1293 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1295 virt_addr = virt_addr + sizeof(*css_hdr);
1297 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1298 auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1300 memcpy((void *)(uintptr_t)virt_addr,
1301 (void *)(image + sizeof(*css_hdr)),
1302 ICP_QAT_CSS_FWSK_MODULUS_LEN);
1304 memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1305 0, ICP_QAT_CSS_FWSK_PAD_LEN);
1308 memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1309 ICP_QAT_CSS_FWSK_PAD_LEN),
1310 (void *)(image + sizeof(*css_hdr) +
1311 ICP_QAT_CSS_FWSK_MODULUS_LEN),
1312 sizeof(unsigned int));
1315 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1316 auth_desc->fwsk_pub_low) +
1317 ICP_QAT_CSS_FWSK_PUB_LEN;
1318 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1319 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1320 auth_desc->signature_low = (unsigned int)bus_addr;
1322 memcpy((void *)(uintptr_t)virt_addr,
1323 (void *)(image + sizeof(*css_hdr) +
1324 ICP_QAT_CSS_FWSK_MODULUS_LEN +
1325 ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1326 ICP_QAT_CSS_SIGNATURE_LEN);
1328 bus_addr = ADD_ADDR(auth_desc->signature_high,
1329 auth_desc->signature_low) +
1330 ICP_QAT_CSS_SIGNATURE_LEN;
1331 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1333 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1334 auth_desc->img_low = (unsigned int)bus_addr;
1335 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1336 memcpy((void *)(uintptr_t)virt_addr,
1337 (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1338 auth_desc->img_len);
1339 virt_addr = virt_base;
1341 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1343 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1344 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1345 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1346 auth_desc->img_ae_mode_data_low) +
1347 sizeof(struct icp_qat_simg_ae_mode);
1349 auth_desc->img_ae_init_data_high = (unsigned int)
1350 (bus_addr >> BITS_IN_DWORD);
1351 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1352 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1353 auth_desc->img_ae_insts_high = (unsigned int)
1354 (bus_addr >> BITS_IN_DWORD);
1355 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1357 auth_desc->img_ae_insts_high = auth_desc->img_high;
1358 auth_desc->img_ae_insts_low = auth_desc->img_low;
1364 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1365 struct icp_qat_fw_auth_desc *desc)
1368 unsigned int fcu_sts;
1369 struct icp_qat_simg_ae_mode *virt_addr;
1370 unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1372 virt_addr = (void *)((uintptr_t)desc +
1373 sizeof(struct icp_qat_auth_chunk) +
1374 sizeof(struct icp_qat_css_hdr) +
1375 ICP_QAT_CSS_FWSK_PUB_LEN +
1376 ICP_QAT_CSS_SIGNATURE_LEN);
1377 for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1380 if (!((virt_addr->ae_mask >> i) & 0x1))
1382 if (qat_hal_check_ae_active(handle, i)) {
1383 pr_err("QAT: AE %d is active\n", i);
1386 SET_CAP_CSR(handle, FCU_CONTROL,
1387 (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1390 msleep(FW_AUTH_WAIT_PERIOD);
1391 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1392 if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1393 FCU_STS_LOAD_DONE) &&
1394 ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1396 } while (retry++ < FW_AUTH_MAX_RETRY);
1397 if (retry > FW_AUTH_MAX_RETRY) {
1398 pr_err("QAT: firmware load failed timeout %x\n", retry);
1405 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1406 void *addr_ptr, int mem_size)
1408 struct icp_qat_suof_handle *suof_handle;
1410 suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1413 handle->sobj_handle = suof_handle;
1414 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1415 qat_uclo_del_suof(handle);
1416 pr_err("QAT: map SUOF failed\n");
1422 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1423 void *addr_ptr, int mem_size)
1425 struct icp_qat_fw_auth_desc *desc = NULL;
1428 if (handle->fw_auth) {
1429 if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1430 status = qat_uclo_auth_fw(handle, desc);
1431 qat_uclo_ummap_auth_fw(handle, &desc);
1433 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1434 pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1437 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1442 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1443 void *addr_ptr, int mem_size)
1445 struct icp_qat_uof_filehdr *filehdr;
1446 struct icp_qat_uclo_objhandle *objhdl;
1448 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1451 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1452 if (!objhdl->obj_buf)
1453 goto out_objbuf_err;
1454 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1455 if (qat_uclo_check_uof_format(filehdr))
1456 goto out_objhdr_err;
1457 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1459 if (!objhdl->obj_hdr) {
1460 pr_err("QAT: object file chunk is null\n");
1461 goto out_objhdr_err;
1463 handle->obj_handle = objhdl;
1464 if (qat_uclo_parse_uof_obj(handle))
1465 goto out_overlay_obj_err;
1468 out_overlay_obj_err:
1469 handle->obj_handle = NULL;
1470 kfree(objhdl->obj_hdr);
1472 kfree(objhdl->obj_buf);
1478 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1479 void *addr_ptr, int mem_size)
1481 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1482 (sizeof(handle->hal_handle->ae_mask) * 8));
1484 if (!handle || !addr_ptr || mem_size < 24)
1487 return (handle->fw_auth) ?
1488 qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1489 qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1492 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1494 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1497 if (handle->sobj_handle)
1498 qat_uclo_del_suof(handle);
1502 kfree(obj_handle->uword_buf);
1503 for (a = 0; a < obj_handle->uimage_num; a++)
1504 kfree(obj_handle->ae_uimage[a].page);
1506 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1507 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1509 kfree(obj_handle->obj_hdr);
1510 kfree(obj_handle->obj_buf);
1512 handle->obj_handle = NULL;
1515 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1516 struct icp_qat_uclo_encap_page *encap_page,
1517 uint64_t *uword, unsigned int addr_p,
1518 unsigned int raddr, uint64_t fill)
1527 for (i = 0; i < encap_page->uwblock_num; i++) {
1528 if (raddr >= encap_page->uwblock[i].start_addr &&
1529 raddr <= encap_page->uwblock[i].start_addr +
1530 encap_page->uwblock[i].words_num - 1) {
1531 raddr -= encap_page->uwblock[i].start_addr;
1532 raddr *= obj_handle->uword_in_bytes;
1533 memcpy(&uwrd, (void *)(((uintptr_t)
1534 encap_page->uwblock[i].micro_words) + raddr),
1535 obj_handle->uword_in_bytes);
1536 uwrd = uwrd & 0xbffffffffffull;
1540 if (*uword == INVLD_UWORD)
1544 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1545 struct icp_qat_uclo_encap_page
1546 *encap_page, unsigned int ae)
1548 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1549 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1552 /* load the page starting at appropriate ustore address */
1553 /* get fill-pattern from an image -- they are all the same */
1554 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1556 uw_physical_addr = encap_page->beg_addr_p;
1557 uw_relative_addr = 0;
1558 words_num = encap_page->micro_words_num;
1560 if (words_num < UWORD_CPYBUF_SIZE)
1563 cpylen = UWORD_CPYBUF_SIZE;
1565 /* load the buffer */
1566 for (i = 0; i < cpylen; i++)
1567 qat_uclo_fill_uwords(obj_handle, encap_page,
1568 &obj_handle->uword_buf[i],
1569 uw_physical_addr + i,
1570 uw_relative_addr + i, fill_pat);
1572 /* copy the buffer to ustore */
1573 qat_hal_wr_uwords(handle, (unsigned char)ae,
1574 uw_physical_addr, cpylen,
1575 obj_handle->uword_buf);
1577 uw_physical_addr += cpylen;
1578 uw_relative_addr += cpylen;
1579 words_num -= cpylen;
1583 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1584 struct icp_qat_uof_image *image)
1586 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1587 unsigned int ctx_mask, s;
1588 struct icp_qat_uclo_page *page;
1592 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1596 /* load the default page and set assigned CTX PC
1597 * to the entrypoint address */
1598 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1599 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1601 /* find the slice to which this image is assigned */
1602 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1603 if (image->ctx_assigned & obj_handle->ae_data[ae].
1604 ae_slices[s].ctx_mask_assigned)
1607 if (s >= obj_handle->ae_data[ae].slice_num)
1609 page = obj_handle->ae_data[ae].ae_slices[s].page;
1610 if (!page->encap_page->def_page)
1612 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1614 page = obj_handle->ae_data[ae].ae_slices[s].page;
1615 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1616 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1617 (ctx_mask & (1 << ctx)) ? page : NULL;
1618 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1619 image->ctx_assigned);
1620 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1621 image->entry_address);
1625 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1628 struct icp_qat_fw_auth_desc *desc = NULL;
1629 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1630 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1632 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1633 if (qat_uclo_map_auth_fw(handle,
1634 (char *)simg_hdr[i].simg_buf,
1636 (simg_hdr[i].simg_len),
1639 if (qat_uclo_auth_fw(handle, desc))
1641 if (qat_uclo_load_fw(handle, desc))
1643 qat_uclo_ummap_auth_fw(handle, &desc);
1647 qat_uclo_ummap_auth_fw(handle, &desc);
1651 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1653 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1656 if (qat_uclo_init_globals(handle))
1658 for (i = 0; i < obj_handle->uimage_num; i++) {
1659 if (!obj_handle->ae_uimage[i].img_ptr)
1661 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1663 qat_uclo_wr_uimage_page(handle,
1664 obj_handle->ae_uimage[i].img_ptr);
1669 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1671 return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1672 qat_uclo_wr_uof_img(handle);