2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include "ssi_config.h"
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <crypto/ctr.h>
23 #ifdef FLUSH_CACHE_ALL
24 #include <asm/cacheflush.h>
26 #include <linux/pm_runtime.h>
27 #include "ssi_driver.h"
28 #include "ssi_buffer_mgr.h"
29 #include "ssi_request_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_ivgen.h"
34 #define SSI_MAX_POLL_ITER 10
36 struct ssi_request_mgr_handle {
37 /* Request manager resources */
38 unsigned int hw_queue_size; /* HW capability */
39 unsigned int min_free_hw_slots;
40 unsigned int max_used_sw_slots;
41 struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
47 struct cc_hw_desc compl_desc;
49 dma_addr_t dummy_comp_buff_dma;
50 struct cc_hw_desc monitor_desc;
52 volatile unsigned long monitor_lock;
54 struct workqueue_struct *workq;
55 struct delayed_work compwork;
57 struct tasklet_struct comptask;
59 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
60 bool is_runtime_suspended;
64 static void comp_handler(unsigned long devarg);
66 static void comp_work_handler(struct work_struct *work);
69 void request_mgr_fini(struct ssi_drvdata *drvdata)
71 struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
74 return; /* Not allocated */
76 if (req_mgr_h->dummy_comp_buff_dma != 0) {
77 dma_free_coherent(&drvdata->plat_dev->dev,
78 sizeof(u32), req_mgr_h->dummy_comp_buff,
79 req_mgr_h->dummy_comp_buff_dma);
82 SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
83 req_mgr_h->min_free_hw_slots));
84 SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
87 flush_workqueue(req_mgr_h->workq);
88 destroy_workqueue(req_mgr_h->workq);
91 tasklet_kill(&req_mgr_h->comptask);
93 memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle));
95 drvdata->request_mgr_handle = NULL;
98 int request_mgr_init(struct ssi_drvdata *drvdata)
100 struct ssi_request_mgr_handle *req_mgr_h;
103 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
106 goto req_mgr_init_err;
109 drvdata->request_mgr_handle = req_mgr_h;
111 spin_lock_init(&req_mgr_h->hw_lock);
113 SSI_LOG_DEBUG("Initializing completion workqueue\n");
114 req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
115 if (unlikely(!req_mgr_h->workq)) {
116 SSI_LOG_ERR("Failed creating work queue\n");
118 goto req_mgr_init_err;
120 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
122 SSI_LOG_DEBUG("Initializing completion tasklet\n");
123 tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
125 req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base +
126 CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE));
127 SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
128 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
129 SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
130 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
132 goto req_mgr_init_err;
134 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
135 req_mgr_h->max_used_sw_slots = 0;
137 /* Allocate DMA word for "dummy" completion descriptor use */
138 req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
140 &req_mgr_h->dummy_comp_buff_dma,
142 if (!req_mgr_h->dummy_comp_buff) {
143 SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
144 "buffer\n", sizeof(u32));
146 goto req_mgr_init_err;
149 /* Init. "dummy" completion descriptor */
150 hw_desc_init(&req_mgr_h->compl_desc);
151 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
152 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
153 sizeof(u32), NS_BIT, 1);
154 set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
155 set_queue_last_ind(&req_mgr_h->compl_desc);
160 request_mgr_fini(drvdata);
164 static inline void enqueue_seq(
165 void __iomem *cc_base,
166 struct cc_hw_desc seq[], unsigned int seq_len)
170 for (i = 0; i < seq_len; i++) {
171 writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
172 writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
173 writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
174 writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
175 writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
177 writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
179 SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
180 seq[i].word[0], seq[i].word[1], seq[i].word[2],
181 seq[i].word[3], seq[i].word[4], seq[i].word[5]);
187 * Completion will take place if and only if user requested completion
188 * by setting "is_dout = 0" in send_request().
191 * \param dx_compl_h The completion event to signal
193 static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
195 struct completion *this_compl = dx_compl_h;
197 complete(this_compl);
200 static inline int request_mgr_queues_status_check(
201 struct ssi_request_mgr_handle *req_mgr_h,
202 void __iomem *cc_base,
203 unsigned int total_seq_len)
205 unsigned long poll_queue;
207 /* SW queue is checked only once as it will not
208 * be chaned during the poll becasue the spinlock_bh
209 * is held by the thread
211 if (unlikely(((req_mgr_h->req_queue_head + 1) &
212 (MAX_REQUEST_QUEUE_SIZE - 1)) ==
213 req_mgr_h->req_queue_tail)) {
214 SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
215 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
219 if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
222 /* Wait for space in HW queue. Poll constant num of iterations. */
223 for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
224 req_mgr_h->q_free_slots =
225 CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
226 DSCRPTR_QUEUE_CONTENT));
227 if (unlikely(req_mgr_h->q_free_slots <
228 req_mgr_h->min_free_hw_slots)) {
229 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
232 if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
233 /* If there is enough place return */
237 SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
238 req_mgr_h->q_free_slots, total_seq_len);
240 /* No room in the HW queue try again later */
241 SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
242 "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
243 req_mgr_h->req_queue_head,
244 MAX_REQUEST_QUEUE_SIZE,
245 req_mgr_h->q_free_slots,
251 * Enqueue caller request to crypto hardware.
254 * \param ssi_req The request to enqueue
255 * \param desc The crypto sequence
256 * \param len The crypto sequence length
257 * \param is_dout If "true": completion is handled by the caller
258 * If "false": this function adds a dummy descriptor completion
259 * and waits upon completion signal.
261 * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
264 struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
265 struct cc_hw_desc *desc, unsigned int len, bool is_dout)
267 void __iomem *cc_base = drvdata->cc_base;
268 struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
269 unsigned int used_sw_slots;
270 unsigned int iv_seq_len = 0;
271 unsigned int total_seq_len = len; /*initial sequence length*/
272 struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
274 unsigned int max_required_seq_len = (total_seq_len +
275 ((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
276 SSI_IVPOOL_SEQ_LEN) +
277 ((is_dout == 0) ? 1 : 0));
279 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
280 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
282 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n", rc);
288 spin_lock_bh(&req_mgr_h->hw_lock);
290 /* Check if there is enough place in the SW/HW queues
291 * in case iv gen add the max size and in case of no dout add 1
292 * for the internal completion descriptor
294 rc = request_mgr_queues_status_check(req_mgr_h, cc_base,
295 max_required_seq_len);
297 /* There is enough place in the queue */
299 /* something wrong release the spinlock*/
300 spin_unlock_bh(&req_mgr_h->hw_lock);
303 /* Any error other than HW queue full
306 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
307 ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
312 /* HW queue is full - short sleep */
316 /* Additional completion descriptor is needed incase caller did not
317 * enabled any DLLI/MLLI DOUT bit in the given sequence
320 init_completion(&ssi_req->seq_compl);
321 ssi_req->user_cb = request_mgr_complete;
322 ssi_req->user_arg = &ssi_req->seq_compl;
326 if (ssi_req->ivgen_dma_addr_len > 0) {
327 SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
328 ssi_req->ivgen_dma_addr_len,
329 ssi_req->ivgen_dma_addr[0],
330 ssi_req->ivgen_dma_addr[1],
331 ssi_req->ivgen_dma_addr[2],
332 ssi_req->ivgen_size);
334 /* Acquire IV from pool */
335 rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
336 ssi_req->ivgen_dma_addr_len,
337 ssi_req->ivgen_size, iv_seq, &iv_seq_len);
339 if (unlikely(rc != 0)) {
340 SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
341 spin_unlock_bh(&req_mgr_h->hw_lock);
342 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
343 ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
348 total_seq_len += iv_seq_len;
351 used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1));
352 if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
353 req_mgr_h->max_used_sw_slots = used_sw_slots;
355 /* Enqueue request - must be locked with HW lock*/
356 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
357 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
358 /* TODO: Use circ_buf.h ? */
360 SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head);
362 #ifdef FLUSH_CACHE_ALL
366 /* STAT_PHASE_4: Push sequence */
367 enqueue_seq(cc_base, iv_seq, iv_seq_len);
368 enqueue_seq(cc_base, desc, len);
369 enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
371 if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
372 /*This means that there was a problem with the resume*/
375 /* Update the free slots in HW queue */
376 req_mgr_h->q_free_slots -= total_seq_len;
378 spin_unlock_bh(&req_mgr_h->hw_lock);
381 /* Wait upon sequence completion.
382 * Return "0" -Operation done successfully.
384 wait_for_completion(&ssi_req->seq_compl);
387 /* Operation still in process */
393 * Enqueue caller request to crypto hardware during init process.
394 * assume this function is not called in middle of a flow,
395 * since we set QUEUE_LAST_IND flag in the last descriptor.
398 * \param desc The crypto sequence
399 * \param len The crypto sequence length
401 * \return int Returns "0" upon success
403 int send_request_init(
404 struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len)
406 void __iomem *cc_base = drvdata->cc_base;
407 struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
408 unsigned int total_seq_len = len; /*initial sequence length*/
411 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
412 rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
413 if (unlikely(rc != 0))
416 set_queue_last_ind(&desc[(len - 1)]);
418 enqueue_seq(cc_base, desc, len);
420 /* Update the free slots in HW queue */
421 req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
422 DSCRPTR_QUEUE_CONTENT));
427 void complete_request(struct ssi_drvdata *drvdata)
429 struct ssi_request_mgr_handle *request_mgr_handle =
430 drvdata->request_mgr_handle;
432 queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
434 tasklet_schedule(&request_mgr_handle->comptask);
439 static void comp_work_handler(struct work_struct *work)
441 struct ssi_drvdata *drvdata =
442 container_of(work, struct ssi_drvdata, compwork.work);
444 comp_handler((unsigned long)drvdata);
448 static void proc_completions(struct ssi_drvdata *drvdata)
450 struct ssi_crypto_req *ssi_req;
451 struct platform_device *plat_dev = drvdata->plat_dev;
452 struct ssi_request_mgr_handle *request_mgr_handle =
453 drvdata->request_mgr_handle;
454 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
458 while (request_mgr_handle->axi_completed) {
459 request_mgr_handle->axi_completed--;
461 /* Dequeue request */
462 if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
463 SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head);
467 ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
469 #ifdef FLUSH_CACHE_ALL
473 #ifdef COMPLETION_DELAY
479 SSI_LOG_INFO("Delay\n");
480 for (i = 0; i < 1000000; i++)
481 axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
483 #endif /* COMPLETION_DELAY */
485 if (likely(ssi_req->user_cb))
486 ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
487 request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
488 SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
489 SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
490 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
491 rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
493 SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc);
498 static inline u32 cc_axi_comp_count(void __iomem *cc_base)
500 /* The CC_HAL_READ_REGISTER macro implictly requires and uses
501 * a base MMIO register address variable named cc_base.
503 return FIELD_GET(AXIM_MON_COMP_VALUE,
504 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
507 /* Deferred service handler, run as interrupt-fired tasklet */
508 static void comp_handler(unsigned long devarg)
510 struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
511 void __iomem *cc_base = drvdata->cc_base;
512 struct ssi_request_mgr_handle *request_mgr_handle =
513 drvdata->request_mgr_handle;
517 irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
519 if (irq & SSI_COMP_IRQ_MASK) {
520 /* To avoid the interrupt from firing as we unmask it, we clear it now */
521 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
523 /* Avoid race with above clear: Test completion counter once more */
524 request_mgr_handle->axi_completed +=
525 cc_axi_comp_count(cc_base);
527 while (request_mgr_handle->axi_completed) {
529 proc_completions(drvdata);
530 /* At this point (after proc_completions()),
531 * request_mgr_handle->axi_completed is 0.
533 request_mgr_handle->axi_completed =
534 cc_axi_comp_count(cc_base);
535 } while (request_mgr_handle->axi_completed > 0);
537 /* To avoid the interrupt from firing as we unmask it, we clear it now */
538 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
540 /* Avoid race with above clear: Test completion counter once more */
541 request_mgr_handle->axi_completed +=
542 cc_axi_comp_count(cc_base);
545 /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
546 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
547 CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
551 * resume the queue configuration - no need to take the lock as this happens inside
552 * the spin lock protection
554 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
555 int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
557 struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
559 spin_lock_bh(&request_mgr_handle->hw_lock);
560 request_mgr_handle->is_runtime_suspended = false;
561 spin_unlock_bh(&request_mgr_handle->hw_lock);
567 * suspend the queue configuration. Since it is used for the runtime suspend
568 * only verify that the queue can be suspended.
570 int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
572 struct ssi_request_mgr_handle *request_mgr_handle =
573 drvdata->request_mgr_handle;
575 /* lock the send_request */
576 spin_lock_bh(&request_mgr_handle->hw_lock);
577 if (request_mgr_handle->req_queue_head !=
578 request_mgr_handle->req_queue_tail) {
579 spin_unlock_bh(&request_mgr_handle->hw_lock);
582 request_mgr_handle->is_runtime_suspended = true;
583 spin_unlock_bh(&request_mgr_handle->hw_lock);
588 bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
590 struct ssi_request_mgr_handle *request_mgr_handle =
591 drvdata->request_mgr_handle;
593 return request_mgr_handle->is_runtime_suspended;