1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_transport.h"
8 #include "adf_transport_access_macros.h"
10 #include "adf_cfg_strings.h"
11 #include "qat_crypto.h"
12 #include "icp_qat_fw.h"
14 #define SEC ADF_KERNEL_SEC
16 static struct service_hndl qat_crypto;
18 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
20 atomic_dec(&inst->refctr);
21 adf_dev_put(inst->accel_dev);
24 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
26 struct qat_crypto_instance *inst, *tmp;
29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30 for (i = 0; i < atomic_read(&inst->refctr); i++)
31 qat_crypto_put_instance(inst);
34 adf_remove_ring(inst->sym_tx);
37 adf_remove_ring(inst->sym_rx);
40 adf_remove_ring(inst->pke_tx);
43 adf_remove_ring(inst->pke_rx);
45 list_del(&inst->list);
51 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54 struct qat_crypto_instance *inst = NULL, *tmp_inst;
55 unsigned long best = ~0;
57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62 adf_dev_started(tmp_dev) &&
63 !list_empty(&tmp_dev->crypto_list)) {
64 ctr = atomic_read(&tmp_dev->ref_count);
73 pr_info("QAT: Could not find a device on node %d\n", node);
74 /* Get any started device */
75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76 if (adf_dev_started(tmp_dev) &&
77 !list_empty(&tmp_dev->crypto_list)) {
88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
91 ctr = atomic_read(&tmp_inst->refctr);
98 if (adf_dev_get(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
102 atomic_inc(&inst->refctr);
108 * qat_crypto_dev_config() - create dev config required to create crypto inst.
110 * @accel_dev: Pointer to acceleration device.
112 * Function creates device configuration required to create crypto instances
114 * Return: 0 on success, error code otherwise.
116 int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
118 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
119 int banks = GET_MAX_BANKS(accel_dev);
120 int cpus = num_online_cpus();
126 if (adf_hw_dev_has_crypto(accel_dev))
127 instances = min(cpus, banks);
131 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
135 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
139 /* Temporarily set the number of crypto instances to zero to avoid
140 * registering the crypto algorithms.
141 * This will be removed when the algorithms will support the
142 * CRYPTO_TFM_REQ_MAY_BACKLOG flag
146 for (i = 0; i < instances; i++) {
148 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
149 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
154 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
155 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
160 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
162 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
167 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
169 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
175 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
176 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
182 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
183 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
189 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
190 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
196 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
197 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
203 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
204 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
209 val = ADF_COALESCING_DEF_TIME;
210 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
211 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
218 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
223 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
226 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
229 EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
231 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
233 unsigned long num_inst, num_msg_sym, num_msg_asym;
234 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
235 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
236 unsigned long sym_bank, asym_bank;
237 struct qat_crypto_instance *inst;
242 INIT_LIST_HEAD(&accel_dev->crypto_list);
243 ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
247 ret = kstrtoul(val, 0, &num_inst);
251 for (i = 0; i < num_inst; i++) {
252 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
253 dev_to_node(&GET_DEV(accel_dev)));
259 list_add_tail(&inst->list, &accel_dev->crypto_list);
261 atomic_set(&inst->refctr, 0);
262 inst->accel_dev = accel_dev;
264 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
265 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
269 ret = kstrtoul(val, 10, &sym_bank);
273 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
274 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
278 ret = kstrtoul(val, 10, &asym_bank);
282 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
283 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
287 ret = kstrtoul(val, 10, &num_msg_sym);
291 num_msg_sym = num_msg_sym >> 1;
293 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
294 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
298 ret = kstrtoul(val, 10, &num_msg_asym);
301 num_msg_asym = num_msg_asym >> 1;
303 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
304 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
305 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
306 msg_size, key, NULL, 0, &inst->sym_tx);
310 msg_size = msg_size >> 1;
311 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
312 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
313 msg_size, key, NULL, 0, &inst->pke_tx);
317 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
318 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
319 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
320 msg_size, key, qat_alg_callback, 0,
325 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
326 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
327 msg_size, key, qat_alg_asym_callback, 0,
334 qat_crypto_free_instances(accel_dev);
338 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
340 if (qat_crypto_create_instances(accel_dev))
346 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
348 return qat_crypto_free_instances(accel_dev);
351 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
352 enum adf_event event)
358 ret = qat_crypto_init(accel_dev);
360 case ADF_EVENT_SHUTDOWN:
361 ret = qat_crypto_shutdown(accel_dev);
363 case ADF_EVENT_RESTARTING:
364 case ADF_EVENT_RESTARTED:
365 case ADF_EVENT_START:
373 int qat_crypto_register(void)
375 memset(&qat_crypto, 0, sizeof(qat_crypto));
376 qat_crypto.event_hld = qat_crypto_event_handler;
377 qat_crypto.name = "qat_crypto";
378 return adf_service_register(&qat_crypto);
381 int qat_crypto_unregister(void)
383 return adf_service_unregister(&qat_crypto);