2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 if (!ae_slice->region)
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
94 kfree(ae_slice->region);
95 ae_slice->region = NULL;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i = 0; i < ae_data->slice_num; i++) {
109 kfree(ae_data->ae_slices[i].region);
110 ae_data->ae_slices[i].region = NULL;
111 kfree(ae_data->ae_slices[i].page);
112 ae_data->ae_slices[i].page = NULL;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
147 unsigned char *ptr = (unsigned char *)val;
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
161 unsigned int num_in_bytes)
164 unsigned char *ptr = (unsigned char *)val;
166 addr >>= 0x2; /* convert to uword address */
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
178 struct icp_qat_uof_batch_init
181 struct icp_qat_uof_batch_init *umem_init;
183 if (!umem_init_header)
185 umem_init = umem_init_header->next;
187 unsigned int addr, *value, size;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
202 struct icp_qat_uof_batch_init *umem_init;
206 struct icp_qat_uof_batch_init *pre;
209 umem_init = umem_init->next;
215 static int qat_uclo_parse_num(char *str, unsigned int *num)
218 unsigned long ae = 0;
221 strncpy(buf, str, 15);
222 for (i = 0; i < 16; i++) {
223 if (!isdigit(buf[i])) {
228 if ((kstrtoul(buf, 10, &ae)))
231 *num = (unsigned int)ae;
235 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
236 struct icp_qat_uof_initmem *init_mem,
237 unsigned int size_range, unsigned int *ae)
239 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
242 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
243 pr_err("QAT: initmem is out of range");
246 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
247 pr_err("QAT: Memory scope for init_mem error\n");
250 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
252 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
255 if (qat_uclo_parse_num(str, ae)) {
256 pr_err("QAT: Parse num for AE number failed\n");
259 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
260 pr_err("QAT: ae %d out of range\n", *ae);
266 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
267 *handle, struct icp_qat_uof_initmem
268 *init_mem, unsigned int ae,
269 struct icp_qat_uof_batch_init
272 struct icp_qat_uof_batch_init *init_header, *tail;
273 struct icp_qat_uof_batch_init *mem_init, *tail_old;
274 struct icp_qat_uof_memvar_attr *mem_val_attr;
275 unsigned int i, flag = 0;
278 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
279 sizeof(struct icp_qat_uof_initmem));
281 init_header = *init_tab_base;
283 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
286 init_header->size = 1;
287 *init_tab_base = init_header;
290 tail_old = init_header;
291 while (tail_old->next)
292 tail_old = tail_old->next;
294 for (i = 0; i < init_mem->val_attr_num; i++) {
295 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
299 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
300 mem_init->value = &mem_val_attr->value;
302 mem_init->next = NULL;
303 tail->next = mem_init;
305 init_header->size += qat_hal_get_ins_num();
310 /* Do not free the list head unless we allocated it. */
311 tail_old = tail_old->next;
313 kfree(*init_tab_base);
314 *init_tab_base = NULL;
318 mem_init = tail_old->next;
325 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
326 struct icp_qat_uof_initmem *init_mem)
328 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
331 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
332 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
334 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
335 &obj_handle->lm_init_tab[ae]))
340 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
341 struct icp_qat_uof_initmem *init_mem)
343 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
344 unsigned int ae, ustore_size, uaddr, i;
346 ustore_size = obj_handle->ustore_phy_size;
347 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
349 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
350 &obj_handle->umem_init_tab[ae]))
352 /* set the highest ustore address referenced */
353 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
354 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
355 if (obj_handle->ae_data[ae].ae_slices[i].
356 encap_image->uwords_num < uaddr)
357 obj_handle->ae_data[ae].ae_slices[i].
358 encap_image->uwords_num = uaddr;
363 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
364 struct icp_qat_uof_initmem *init_mem)
366 switch (init_mem->region) {
367 case ICP_QAT_UOF_LMEM_REGION:
368 if (qat_uclo_init_lmem_seg(handle, init_mem))
371 case ICP_QAT_UOF_UMEM_REGION:
372 if (qat_uclo_init_umem_seg(handle, init_mem))
376 pr_err("QAT: initmem region error. region type=0x%x\n",
383 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
384 struct icp_qat_uclo_encapme *image)
387 struct icp_qat_uclo_encap_page *page;
388 struct icp_qat_uof_image *uof_image;
390 unsigned int ustore_size;
391 unsigned int patt_pos;
392 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
395 uof_image = image->img_ptr;
396 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
400 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
401 memcpy(&fill_data[i], &uof_image->fill_pattern,
405 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
406 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
408 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
409 patt_pos = page->beg_addr_p + page->micro_words_num;
411 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
412 page->beg_addr_p, &fill_data[0]);
413 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
414 ustore_size - patt_pos + 1,
415 &fill_data[page->beg_addr_p]);
421 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
424 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
425 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
427 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
428 if (initmem->num_in_bytes) {
429 if (qat_uclo_init_ae_memory(handle, initmem))
432 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
433 (unsigned long)initmem +
434 sizeof(struct icp_qat_uof_initmem)) +
435 (sizeof(struct icp_qat_uof_memvar_attr) *
436 initmem->val_attr_num));
438 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
439 if (qat_hal_batch_wr_lm(handle, ae,
440 obj_handle->lm_init_tab[ae])) {
441 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
444 qat_uclo_cleanup_batch_init_list(handle,
445 &obj_handle->lm_init_tab[ae]);
446 qat_uclo_batch_wr_umem(handle, ae,
447 obj_handle->umem_init_tab[ae]);
448 qat_uclo_cleanup_batch_init_list(handle,
455 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
456 char *chunk_id, void *cur)
459 struct icp_qat_uof_chunkhdr *chunk_hdr =
460 (struct icp_qat_uof_chunkhdr *)
461 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
463 for (i = 0; i < obj_hdr->num_chunks; i++) {
464 if ((cur < (void *)&chunk_hdr[i]) &&
465 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
466 ICP_QAT_UOF_OBJID_LEN)) {
467 return &chunk_hdr[i];
473 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
476 unsigned int topbit = 1 << 0xF;
477 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
479 reg ^= inbyte << 0x8;
480 for (i = 0; i < 0x8; i++) {
482 reg = (reg << 1) ^ 0x1021;
489 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
491 unsigned int chksum = 0;
495 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
499 static struct icp_qat_uclo_objhdr *
500 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
503 struct icp_qat_uof_filechunkhdr *file_chunk;
504 struct icp_qat_uclo_objhdr *obj_hdr;
508 file_chunk = (struct icp_qat_uof_filechunkhdr *)
509 (buf + sizeof(struct icp_qat_uof_filehdr));
510 for (i = 0; i < file_hdr->num_chunks; i++) {
511 if (!strncmp(file_chunk->chunk_id, chunk_id,
512 ICP_QAT_UOF_OBJID_LEN)) {
513 chunk = buf + file_chunk->offset;
514 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
515 chunk, file_chunk->size))
517 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
520 obj_hdr->file_buff = chunk;
521 obj_hdr->checksum = file_chunk->checksum;
522 obj_hdr->size = file_chunk->size;
531 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
532 struct icp_qat_uof_image *image)
534 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
535 struct icp_qat_uof_objtable *neigh_reg_tab;
536 struct icp_qat_uof_code_page *code_page;
538 code_page = (struct icp_qat_uof_code_page *)
539 ((char *)image + sizeof(struct icp_qat_uof_image));
540 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
541 code_page->uc_var_tab_offset);
542 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
543 code_page->imp_var_tab_offset);
544 imp_expr_tab = (struct icp_qat_uof_objtable *)
545 (encap_uof_obj->beg_uof +
546 code_page->imp_expr_tab_offset);
547 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
548 imp_expr_tab->entry_num) {
549 pr_err("QAT: UOF can't contain imported variable to be parsed");
552 neigh_reg_tab = (struct icp_qat_uof_objtable *)
553 (encap_uof_obj->beg_uof +
554 code_page->neigh_reg_tab_offset);
555 if (neigh_reg_tab->entry_num) {
556 pr_err("QAT: UOF can't contain shared control store feature");
559 if (image->numpages > 1) {
560 pr_err("QAT: UOF can't contain multiple pages");
563 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
564 pr_err("QAT: UOF can't use shared control store feature");
567 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
568 pr_err("QAT: UOF can't use reloadable feature");
574 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
576 struct icp_qat_uof_image *img,
577 struct icp_qat_uclo_encap_page *page)
579 struct icp_qat_uof_code_page *code_page;
580 struct icp_qat_uof_code_area *code_area;
581 struct icp_qat_uof_objtable *uword_block_tab;
582 struct icp_qat_uof_uword_block *uwblock;
585 code_page = (struct icp_qat_uof_code_page *)
586 ((char *)img + sizeof(struct icp_qat_uof_image));
587 page->def_page = code_page->def_page;
588 page->page_region = code_page->page_region;
589 page->beg_addr_v = code_page->beg_addr_v;
590 page->beg_addr_p = code_page->beg_addr_p;
591 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
592 code_page->code_area_offset);
593 page->micro_words_num = code_area->micro_words_num;
594 uword_block_tab = (struct icp_qat_uof_objtable *)
595 (encap_uof_obj->beg_uof +
596 code_area->uword_block_tab);
597 page->uwblock_num = uword_block_tab->entry_num;
598 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
599 sizeof(struct icp_qat_uof_objtable));
600 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
601 for (i = 0; i < uword_block_tab->entry_num; i++)
602 page->uwblock[i].micro_words =
603 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
606 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
607 struct icp_qat_uclo_encapme *ae_uimage,
611 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
612 struct icp_qat_uof_image *image;
613 struct icp_qat_uof_objtable *ae_regtab;
614 struct icp_qat_uof_objtable *init_reg_sym_tab;
615 struct icp_qat_uof_objtable *sbreak_tab;
616 struct icp_qat_uof_encap_obj *encap_uof_obj =
617 &obj_handle->encap_uof_obj;
619 for (j = 0; j < max_image; j++) {
620 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
621 ICP_QAT_UOF_IMAG, chunk_hdr);
624 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
626 ae_regtab = (struct icp_qat_uof_objtable *)
627 (image->reg_tab_offset +
628 obj_handle->obj_hdr->file_buff);
629 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
630 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
631 (((char *)ae_regtab) +
632 sizeof(struct icp_qat_uof_objtable));
633 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
634 (image->init_reg_sym_tab +
635 obj_handle->obj_hdr->file_buff);
636 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
637 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
638 (((char *)init_reg_sym_tab) +
639 sizeof(struct icp_qat_uof_objtable));
640 sbreak_tab = (struct icp_qat_uof_objtable *)
641 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
642 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
643 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
644 (((char *)sbreak_tab) +
645 sizeof(struct icp_qat_uof_objtable));
646 ae_uimage[j].img_ptr = image;
647 if (qat_uclo_check_image_compat(encap_uof_obj, image))
650 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
652 if (!ae_uimage[j].page)
654 qat_uclo_map_image_page(encap_uof_obj, image,
659 for (i = 0; i < j; i++)
660 kfree(ae_uimage[i].page);
664 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
668 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
670 for (ae = 0; ae <= max_ae; ae++) {
672 (unsigned long *)&handle->hal_handle->ae_mask))
674 for (i = 0; i < obj_handle->uimage_num; i++) {
675 if (!test_bit(ae, (unsigned long *)
676 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
679 if (qat_uclo_init_ae_data(obj_handle, ae, i))
684 pr_err("QAT: uimage uses AE not set");
690 static struct icp_qat_uof_strtable *
691 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
692 char *tab_name, struct icp_qat_uof_strtable *str_table)
694 struct icp_qat_uof_chunkhdr *chunk_hdr;
696 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
697 obj_hdr->file_buff, tab_name, NULL);
701 memcpy(&str_table->table_len, obj_hdr->file_buff +
702 chunk_hdr->offset, sizeof(str_table->table_len));
703 hdr_size = (char *)&str_table->strings - (char *)str_table;
704 str_table->strings = (unsigned long)obj_hdr->file_buff +
705 chunk_hdr->offset + hdr_size;
712 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
713 struct icp_qat_uclo_init_mem_table *init_mem_tab)
715 struct icp_qat_uof_chunkhdr *chunk_hdr;
717 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
718 ICP_QAT_UOF_IMEM, NULL);
720 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
721 chunk_hdr->offset, sizeof(unsigned int));
722 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
723 (encap_uof_obj->beg_uof + chunk_hdr->offset +
724 sizeof(unsigned int));
728 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
730 unsigned int maj_ver, prod_type = obj_handle->prod_type;
732 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
733 pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
734 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
737 maj_ver = obj_handle->prod_rev & 0xff;
738 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
739 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
740 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
746 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
747 unsigned char ae, unsigned char ctx_mask,
748 enum icp_qat_uof_regtype reg_type,
749 unsigned short reg_addr, unsigned int value)
757 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
768 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
775 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
778 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
780 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
786 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
788 struct icp_qat_uclo_encapme *encap_ae)
791 unsigned char ctx_mask;
792 struct icp_qat_uof_init_regsym *init_regsym;
794 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
795 ICP_QAT_UCLO_MAX_CTX)
800 for (i = 0; i < encap_ae->init_regsym_num; i++) {
801 unsigned int exp_res;
803 init_regsym = &encap_ae->init_regsym[i];
804 exp_res = init_regsym->value;
805 switch (init_regsym->init_type) {
806 case ICP_QAT_UOF_INIT_REG:
807 qat_uclo_init_reg(handle, ae, ctx_mask,
808 (enum icp_qat_uof_regtype)
809 init_regsym->reg_type,
810 (unsigned short)init_regsym->reg_addr,
813 case ICP_QAT_UOF_INIT_REG_CTX:
814 /* check if ctx is appropriate for the ctxMode */
815 if (!((1 << init_regsym->ctx) & ctx_mask)) {
816 pr_err("QAT: invalid ctx num = 0x%x\n",
820 qat_uclo_init_reg(handle, ae,
822 (1 << init_regsym->ctx),
823 (enum icp_qat_uof_regtype)
824 init_regsym->reg_type,
825 (unsigned short)init_regsym->reg_addr,
828 case ICP_QAT_UOF_INIT_EXPR:
829 pr_err("QAT: INIT_EXPR feature not supported\n");
831 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
832 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
841 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
843 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
846 if (obj_handle->global_inited)
848 if (obj_handle->init_mem_tab.entry_num) {
849 if (qat_uclo_init_memory(handle)) {
850 pr_err("QAT: initialize memory failed\n");
854 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
855 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
856 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
858 if (qat_uclo_init_reg_sym(handle, ae,
859 obj_handle->ae_data[ae].
860 ae_slices[s].encap_image))
864 obj_handle->global_inited = 1;
868 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
870 unsigned char ae, nn_mode, s;
871 struct icp_qat_uof_image *uof_image;
872 struct icp_qat_uclo_aedata *ae_data;
873 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
875 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
877 (unsigned long *)&handle->hal_handle->ae_mask))
879 ae_data = &obj_handle->ae_data[ae];
880 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
881 ICP_QAT_UCLO_MAX_CTX); s++) {
882 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
884 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
885 if (qat_hal_set_ae_ctx_mode(handle, ae,
886 (char)ICP_QAT_CTX_MODE
887 (uof_image->ae_mode))) {
888 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
891 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
892 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
893 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
896 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
897 (char)ICP_QAT_LOC_MEM0_MODE
898 (uof_image->ae_mode))) {
899 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
902 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
903 (char)ICP_QAT_LOC_MEM1_MODE
904 (uof_image->ae_mode))) {
905 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
913 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
915 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
916 struct icp_qat_uclo_encapme *image;
919 for (a = 0; a < obj_handle->uimage_num; a++) {
920 image = &obj_handle->ae_uimage[a];
921 image->uwords_num = image->page->beg_addr_p +
922 image->page->micro_words_num;
926 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
928 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
931 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
933 if (!obj_handle->uword_buf)
935 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
936 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
937 obj_handle->obj_hdr->file_buff;
938 obj_handle->uword_in_bytes = 6;
939 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
940 obj_handle->prod_rev = PID_MAJOR_REV |
941 (PID_MINOR_REV & handle->hal_handle->revision_id);
942 if (qat_uclo_check_uof_compat(obj_handle)) {
943 pr_err("QAT: UOF incompatible\n");
946 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
947 if (!obj_handle->obj_hdr->file_buff ||
948 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
949 &obj_handle->str_table)) {
950 pr_err("QAT: UOF doesn't have effective images\n");
953 obj_handle->uimage_num =
954 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
955 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
956 if (!obj_handle->uimage_num)
958 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
959 pr_err("QAT: Bad object\n");
960 goto out_check_uof_aemask_err;
962 qat_uclo_init_uword_num(handle);
963 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
964 &obj_handle->init_mem_tab);
965 if (qat_uclo_set_ae_mode(handle))
966 goto out_check_uof_aemask_err;
968 out_check_uof_aemask_err:
969 for (ae = 0; ae < obj_handle->uimage_num; ae++)
970 kfree(obj_handle->ae_uimage[ae].page);
972 kfree(obj_handle->uword_buf);
976 void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
977 void *addr_ptr, int mem_size)
979 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
982 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
983 void *addr_ptr, int mem_size)
985 struct icp_qat_uof_filehdr *filehdr;
986 struct icp_qat_uclo_objhandle *objhdl;
988 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
989 (sizeof(handle->hal_handle->ae_mask) * 8));
991 if (!handle || !addr_ptr || mem_size < 24)
993 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
996 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
997 if (!objhdl->obj_buf)
999 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1000 if (qat_uclo_check_format(filehdr))
1001 goto out_objhdr_err;
1002 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1004 if (!objhdl->obj_hdr) {
1005 pr_err("QAT: object file chunk is null\n");
1006 goto out_objhdr_err;
1008 handle->obj_handle = objhdl;
1009 if (qat_uclo_parse_uof_obj(handle))
1010 goto out_overlay_obj_err;
1013 out_overlay_obj_err:
1014 handle->obj_handle = NULL;
1015 kfree(objhdl->obj_hdr);
1017 kfree(objhdl->obj_buf);
1023 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1025 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1031 kfree(obj_handle->uword_buf);
1032 for (a = 0; a < obj_handle->uimage_num; a++)
1033 kfree(obj_handle->ae_uimage[a].page);
1035 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1036 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1038 kfree(obj_handle->obj_hdr);
1039 kfree(obj_handle->obj_buf);
1041 handle->obj_handle = NULL;
1044 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1045 struct icp_qat_uclo_encap_page *encap_page,
1046 uint64_t *uword, unsigned int addr_p,
1047 unsigned int raddr, uint64_t fill)
1056 for (i = 0; i < encap_page->uwblock_num; i++) {
1057 if (raddr >= encap_page->uwblock[i].start_addr &&
1058 raddr <= encap_page->uwblock[i].start_addr +
1059 encap_page->uwblock[i].words_num - 1) {
1060 raddr -= encap_page->uwblock[i].start_addr;
1061 raddr *= obj_handle->uword_in_bytes;
1062 memcpy(&uwrd, (void *)(((unsigned long)
1063 encap_page->uwblock[i].micro_words) + raddr),
1064 obj_handle->uword_in_bytes);
1065 uwrd = uwrd & 0xbffffffffffull;
1069 if (*uword == INVLD_UWORD)
1073 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1074 struct icp_qat_uclo_encap_page
1075 *encap_page, unsigned int ae)
1077 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1078 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1081 /* load the page starting at appropriate ustore address */
1082 /* get fill-pattern from an image -- they are all the same */
1083 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1085 uw_physical_addr = encap_page->beg_addr_p;
1086 uw_relative_addr = 0;
1087 words_num = encap_page->micro_words_num;
1089 if (words_num < UWORD_CPYBUF_SIZE)
1092 cpylen = UWORD_CPYBUF_SIZE;
1094 /* load the buffer */
1095 for (i = 0; i < cpylen; i++)
1096 qat_uclo_fill_uwords(obj_handle, encap_page,
1097 &obj_handle->uword_buf[i],
1098 uw_physical_addr + i,
1099 uw_relative_addr + i, fill_pat);
1101 /* copy the buffer to ustore */
1102 qat_hal_wr_uwords(handle, (unsigned char)ae,
1103 uw_physical_addr, cpylen,
1104 obj_handle->uword_buf);
1106 uw_physical_addr += cpylen;
1107 uw_relative_addr += cpylen;
1108 words_num -= cpylen;
1112 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1113 struct icp_qat_uof_image *image)
1115 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1116 unsigned int ctx_mask, s;
1117 struct icp_qat_uclo_page *page;
1121 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1125 /* load the default page and set assigned CTX PC
1126 * to the entrypoint address */
1127 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1128 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1130 /* find the slice to which this image is assigned */
1131 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1132 if (image->ctx_assigned & obj_handle->ae_data[ae].
1133 ae_slices[s].ctx_mask_assigned)
1136 if (s >= obj_handle->ae_data[ae].slice_num)
1138 page = obj_handle->ae_data[ae].ae_slices[s].page;
1139 if (!page->encap_page->def_page)
1141 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1143 page = obj_handle->ae_data[ae].ae_slices[s].page;
1144 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1145 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1146 (ctx_mask & (1 << ctx)) ? page : NULL;
1147 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1148 image->ctx_assigned);
1149 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1150 image->entry_address);
1154 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1156 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1159 if (qat_uclo_init_globals(handle))
1161 for (i = 0; i < obj_handle->uimage_num; i++) {
1162 if (!obj_handle->ae_uimage[i].img_ptr)
1164 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1166 qat_uclo_wr_uimage_page(handle,
1167 obj_handle->ae_uimage[i].img_ptr);