1 // SPDX-License-Identifier: GPL-2.0-only
4 * Support for Nomadik hardware crypto engine.
6 * Copyright (C) ST-Ericsson SA 2010
7 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
8 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
9 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
10 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
11 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
14 #define pr_fmt(fmt) "hashX hashX: " fmt
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
22 #include <linux/klist.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/platform_device.h>
27 #include <linux/crypto.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/dmaengine.h>
31 #include <linux/bitops.h>
33 #include <crypto/internal/hash.h>
34 #include <crypto/sha1.h>
35 #include <crypto/sha2.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
39 #include <linux/platform_data/crypto-ux500.h>
44 module_param(hash_mode, int, 0);
45 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
47 /* HMAC-SHA1, no key */
48 static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
49 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
50 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
51 0x70, 0x69, 0x0e, 0x1d
54 /* HMAC-SHA256, no key */
55 static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
56 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
57 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
58 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
59 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
63 * struct hash_driver_data - data specific to the driver.
65 * @device_list: A list of registered devices to choose from.
66 * @device_allocation: A semaphore initialized with number of devices.
68 struct hash_driver_data {
69 struct klist device_list;
70 struct semaphore device_allocation;
73 static struct hash_driver_data driver_data;
75 /* Declaration of functions */
77 * hash_messagepad - Pads a message and write the nblw bits.
78 * @device_data: Structure for the hash device.
79 * @message: Last word of a message
80 * @index_bytes: The number of bytes in the last message
82 * This function manages the final part of the digest calculation, when less
83 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
86 static void hash_messagepad(struct hash_device_data *device_data,
87 const u32 *message, u8 index_bytes);
90 * release_hash_device - Releases a previously allocated hash device.
91 * @device_data: Structure for the hash device.
94 static void release_hash_device(struct hash_device_data *device_data)
96 spin_lock(&device_data->ctx_lock);
97 device_data->current_ctx->device = NULL;
98 device_data->current_ctx = NULL;
99 spin_unlock(&device_data->ctx_lock);
102 * The down_interruptible part for this semaphore is called in
103 * cryp_get_device_data.
105 up(&driver_data.device_allocation);
108 static void hash_dma_setup_channel(struct hash_device_data *device_data,
111 struct hash_platform_data *platform_data = dev->platform_data;
112 struct dma_slave_config conf = {
113 .direction = DMA_MEM_TO_DEV,
114 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
115 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
119 dma_cap_zero(device_data->dma.mask);
120 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
122 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
123 device_data->dma.chan_mem2hash =
124 dma_request_channel(device_data->dma.mask,
125 platform_data->dma_filter,
126 device_data->dma.cfg_mem2hash);
128 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
130 init_completion(&device_data->dma.complete);
133 static void hash_dma_callback(void *data)
135 struct hash_ctx *ctx = data;
137 complete(&ctx->device->dma.complete);
140 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
141 int len, enum dma_data_direction direction)
143 struct dma_async_tx_descriptor *desc = NULL;
144 struct dma_chan *channel = NULL;
146 if (direction != DMA_TO_DEVICE) {
147 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
152 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
154 channel = ctx->device->dma.chan_mem2hash;
155 ctx->device->dma.sg = sg;
156 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
157 ctx->device->dma.sg, ctx->device->dma.nents,
160 if (!ctx->device->dma.sg_len) {
161 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
166 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
168 desc = dmaengine_prep_slave_sg(channel,
169 ctx->device->dma.sg, ctx->device->dma.sg_len,
170 DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
172 dev_err(ctx->device->dev,
173 "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
177 desc->callback = hash_dma_callback;
178 desc->callback_param = ctx;
180 dmaengine_submit(desc);
181 dma_async_issue_pending(channel);
186 static void hash_dma_done(struct hash_ctx *ctx)
188 struct dma_chan *chan;
190 chan = ctx->device->dma.chan_mem2hash;
191 dmaengine_terminate_all(chan);
192 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
193 ctx->device->dma.nents, DMA_TO_DEVICE);
196 static int hash_dma_write(struct hash_ctx *ctx,
197 struct scatterlist *sg, int len)
199 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
201 dev_dbg(ctx->device->dev,
202 "%s: hash_set_dma_transfer() failed\n", __func__);
210 * get_empty_message_digest - Returns a pre-calculated digest for
212 * @device_data: Structure for the hash device.
213 * @zero_hash: Buffer to return the empty message digest.
214 * @zero_hash_size: Hash size of the empty message digest.
215 * @zero_digest: True if zero_digest returned.
217 static int get_empty_message_digest(
218 struct hash_device_data *device_data,
219 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
222 struct hash_ctx *ctx = device_data->current_ctx;
223 *zero_digest = false;
226 * Caller responsible for ctx != NULL.
229 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
230 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
231 memcpy(zero_hash, &sha1_zero_message_hash[0],
233 *zero_hash_size = SHA1_DIGEST_SIZE;
235 } else if (HASH_ALGO_SHA256 ==
236 ctx->config.algorithm) {
237 memcpy(zero_hash, &sha256_zero_message_hash[0],
239 *zero_hash_size = SHA256_DIGEST_SIZE;
242 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
247 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
249 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
250 memcpy(zero_hash, &zero_message_hmac_sha1[0],
252 *zero_hash_size = SHA1_DIGEST_SIZE;
254 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
255 memcpy(zero_hash, &zero_message_hmac_sha256[0],
257 *zero_hash_size = SHA256_DIGEST_SIZE;
260 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
266 dev_dbg(device_data->dev,
267 "%s: Continue hash calculation, since hmac key available\n",
277 * hash_disable_power - Request to disable power and clock.
278 * @device_data: Structure for the hash device.
279 * @save_device_state: If true, saves the current hw state.
281 * This function request for disabling power (regulator) and clock,
282 * and could also save current hw state.
284 static int hash_disable_power(struct hash_device_data *device_data,
285 bool save_device_state)
288 struct device *dev = device_data->dev;
290 spin_lock(&device_data->power_state_lock);
291 if (!device_data->power_state)
294 if (save_device_state) {
295 hash_save_state(device_data,
296 &device_data->state);
297 device_data->restore_dev_state = true;
300 clk_disable(device_data->clk);
301 ret = regulator_disable(device_data->regulator);
303 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
305 device_data->power_state = false;
308 spin_unlock(&device_data->power_state_lock);
314 * hash_enable_power - Request to enable power and clock.
315 * @device_data: Structure for the hash device.
316 * @restore_device_state: If true, restores a previous saved hw state.
318 * This function request for enabling power (regulator) and clock,
319 * and could also restore a previously saved hw state.
321 static int hash_enable_power(struct hash_device_data *device_data,
322 bool restore_device_state)
325 struct device *dev = device_data->dev;
327 spin_lock(&device_data->power_state_lock);
328 if (!device_data->power_state) {
329 ret = regulator_enable(device_data->regulator);
331 dev_err(dev, "%s: regulator_enable() failed!\n",
335 ret = clk_enable(device_data->clk);
337 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
338 ret = regulator_disable(
339 device_data->regulator);
342 device_data->power_state = true;
345 if (device_data->restore_dev_state) {
346 if (restore_device_state) {
347 device_data->restore_dev_state = false;
348 hash_resume_state(device_data, &device_data->state);
352 spin_unlock(&device_data->power_state_lock);
358 * hash_get_device_data - Checks for an available hash device and return it.
359 * @ctx: Structure for the hash context.
360 * @device_data: Structure for the hash device.
362 * This function check for an available hash device and return it to
364 * Note! Caller need to release the device, calling up().
366 static int hash_get_device_data(struct hash_ctx *ctx,
367 struct hash_device_data **device_data)
370 struct klist_iter device_iterator;
371 struct klist_node *device_node;
372 struct hash_device_data *local_device_data = NULL;
374 /* Wait until a device is available */
375 ret = down_interruptible(&driver_data.device_allocation);
377 return ret; /* Interrupted */
379 /* Select a device */
380 klist_iter_init(&driver_data.device_list, &device_iterator);
381 device_node = klist_next(&device_iterator);
382 while (device_node) {
383 local_device_data = container_of(device_node,
384 struct hash_device_data, list_node);
385 spin_lock(&local_device_data->ctx_lock);
386 /* current_ctx allocates a device, NULL = unallocated */
387 if (local_device_data->current_ctx) {
388 device_node = klist_next(&device_iterator);
390 local_device_data->current_ctx = ctx;
391 ctx->device = local_device_data;
392 spin_unlock(&local_device_data->ctx_lock);
395 spin_unlock(&local_device_data->ctx_lock);
397 klist_iter_exit(&device_iterator);
401 * No free device found.
402 * Since we allocated a device with down_interruptible, this
403 * should not be able to happen.
404 * Number of available devices, which are contained in
405 * device_allocation, is therefore decremented by not doing
406 * an up(device_allocation).
411 *device_data = local_device_data;
417 * hash_hw_write_key - Writes the key to the hardware registries.
419 * @device_data: Structure for the hash device.
420 * @key: Key to be written.
421 * @keylen: The lengt of the key.
423 * Note! This function DOES NOT write to the NBLW registry, even though
424 * specified in the the hw design spec. Either due to incorrect info in the
425 * spec or due to a bug in the hw.
427 static void hash_hw_write_key(struct hash_device_data *device_data,
428 const u8 *key, unsigned int keylen)
433 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
435 while (keylen >= 4) {
436 u32 *key_word = (u32 *)key;
438 HASH_SET_DIN(key_word, nwords);
443 /* Take care of the remaining bytes in the last word */
447 word |= (key[keylen - 1] << (8 * (keylen - 1)));
451 HASH_SET_DIN(&word, nwords);
454 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
459 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
464 * init_hash_hw - Initialise the hash hardware for a new calculation.
465 * @device_data: Structure for the hash device.
466 * @ctx: The hash context.
468 * This function will enable the bits needed to clear and start a new
471 static int init_hash_hw(struct hash_device_data *device_data,
472 struct hash_ctx *ctx)
476 ret = hash_setconfiguration(device_data, &ctx->config);
478 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
483 hash_begin(device_data, ctx);
485 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
486 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
492 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
495 * @size: Size in bytes.
496 * @aligned: True if sg data aligned to work in DMA mode.
499 static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
502 bool aligned_data = true;
504 while (size > 0 && sg) {
508 /* hash_set_dma_transfer will align last nent */
509 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
510 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
511 aligned_data = false;
517 *aligned = aligned_data;
526 * hash_dma_valid_data - checks for dma valid sg data.
528 * @datasize: Datasize in bytes.
530 * NOTE! This function checks for dma valid sg data, since dma
531 * only accept datasizes of even wordsize.
533 static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
537 /* Need to include at least one nent, else error */
538 if (hash_get_nents(sg, datasize, &aligned) < 1)
545 * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
546 * @req: The hash request for the job.
548 * Initialize structures.
550 static int ux500_hash_init(struct ahash_request *req)
552 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
554 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
559 memset(&req_ctx->state, 0, sizeof(struct hash_state));
560 req_ctx->updated = 0;
561 if (hash_mode == HASH_MODE_DMA) {
562 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
563 req_ctx->dma_mode = false; /* Don't use DMA */
565 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
566 __func__, HASH_DMA_ALIGN_SIZE);
568 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
569 hash_dma_valid_data(req->src, req->nbytes)) {
570 req_ctx->dma_mode = true;
572 req_ctx->dma_mode = false;
573 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
575 HASH_DMA_PERFORMANCE_MIN_SIZE);
583 * hash_processblock - This function processes a single block of 512 bits (64
584 * bytes), word aligned, starting at message.
585 * @device_data: Structure for the hash device.
586 * @message: Block (512 bits) of message to be written to
588 * @length: Message length
591 static void hash_processblock(struct hash_device_data *device_data,
592 const u32 *message, int length)
594 int len = length / HASH_BYTES_PER_WORD;
596 * NBLW bits. Reset the number of bits in last word (NBLW).
598 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
601 * Write message data to the HASH_DIN register.
603 HASH_SET_DIN(message, len);
607 * hash_messagepad - Pads a message and write the nblw bits.
608 * @device_data: Structure for the hash device.
609 * @message: Last word of a message.
610 * @index_bytes: The number of bytes in the last message.
612 * This function manages the final part of the digest calculation, when less
613 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
616 static void hash_messagepad(struct hash_device_data *device_data,
617 const u32 *message, u8 index_bytes)
622 * Clear hash str register, only clear NBLW
623 * since DCAL will be reset by hardware.
625 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
628 while (index_bytes >= 4) {
629 HASH_SET_DIN(message, nwords);
635 HASH_SET_DIN(message, nwords);
637 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
640 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
641 HASH_SET_NBLW(index_bytes * 8);
642 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
643 __func__, readl_relaxed(&device_data->base->din),
644 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
646 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
647 __func__, readl_relaxed(&device_data->base->din),
648 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
650 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
655 * hash_incrementlength - Increments the length of the current message.
657 * @incr: Length of message processed already
659 * Overflow cannot occur, because conditions for overflow are checked in
662 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
664 ctx->state.length.low_word += incr;
666 /* Check for wrap-around */
667 if (ctx->state.length.low_word < incr)
668 ctx->state.length.high_word++;
672 * hash_setconfiguration - Sets the required configuration for the hash
674 * @device_data: Structure for the hash device.
675 * @config: Pointer to a configuration structure.
677 int hash_setconfiguration(struct hash_device_data *device_data,
678 struct hash_config *config)
682 if (config->algorithm != HASH_ALGO_SHA1 &&
683 config->algorithm != HASH_ALGO_SHA256)
687 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
688 * to be written to HASH_DIN is considered as 32 bits.
690 HASH_SET_DATA_FORMAT(config->data_format);
693 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
695 switch (config->algorithm) {
697 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
700 case HASH_ALGO_SHA256:
701 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
705 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
711 * MODE bit. This bit selects between HASH or HMAC mode for the
712 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
714 if (HASH_OPER_MODE_HASH == config->oper_mode)
715 HASH_CLEAR_BITS(&device_data->base->cr,
717 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
718 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
719 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
720 /* Truncate key to blocksize */
721 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
722 HASH_SET_BITS(&device_data->base->cr,
725 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
727 HASH_CLEAR_BITS(&device_data->base->cr,
730 } else { /* Wrong hash mode */
732 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
739 * hash_begin - This routine resets some globals and initializes the hash
741 * @device_data: Structure for the hash device.
742 * @ctx: Hash context.
744 void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
746 /* HW and SW initializations */
747 /* Note: there is no need to initialize buffer and digest members */
749 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
753 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
754 * prepare the initialize the HASH accelerator to compute the message
755 * digest of a new message.
760 * NBLW bits. Reset the number of bits in last word (NBLW).
762 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
765 static int hash_process_data(struct hash_device_data *device_data,
766 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
767 int msg_length, u8 *data_buffer, u8 *buffer,
774 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
775 for (count = 0; count < msg_length; count++) {
776 buffer[*index + count] =
777 *(data_buffer + count);
779 *index += msg_length;
782 if (req_ctx->updated) {
783 ret = hash_resume_state(device_data,
784 &device_data->state);
785 memmove(req_ctx->state.buffer,
786 device_data->state.buffer,
789 dev_err(device_data->dev,
790 "%s: hash_resume_state() failed!\n",
795 ret = init_hash_hw(device_data, ctx);
797 dev_err(device_data->dev,
798 "%s: init_hash_hw() failed!\n",
802 req_ctx->updated = 1;
805 * If 'data_buffer' is four byte aligned and
806 * local buffer does not have any data, we can
807 * write data directly from 'data_buffer' to
808 * HW peripheral, otherwise we first copy data
811 if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
813 hash_processblock(device_data,
814 (const u32 *)data_buffer,
818 count < (u32)(HASH_BLOCK_SIZE - *index);
820 buffer[*index + count] =
821 *(data_buffer + count);
823 hash_processblock(device_data,
827 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
828 data_buffer += (HASH_BLOCK_SIZE - *index);
830 msg_length -= (HASH_BLOCK_SIZE - *index);
833 ret = hash_save_state(device_data,
834 &device_data->state);
836 memmove(device_data->state.buffer,
837 req_ctx->state.buffer,
840 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
845 } while (msg_length != 0);
852 * hash_dma_final - The hash dma final function for SHA1/SHA256.
853 * @req: The hash request for the job.
855 static int hash_dma_final(struct ahash_request *req)
858 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
860 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
861 struct hash_device_data *device_data;
862 u8 digest[SHA256_DIGEST_SIZE];
863 int bytes_written = 0;
865 ret = hash_get_device_data(ctx, &device_data);
869 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
872 if (req_ctx->updated) {
873 ret = hash_resume_state(device_data, &device_data->state);
876 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
882 if (!req_ctx->updated) {
883 ret = hash_setconfiguration(device_data, &ctx->config);
885 dev_err(device_data->dev,
886 "%s: hash_setconfiguration() failed!\n",
891 /* Enable DMA input */
892 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
893 HASH_CLEAR_BITS(&device_data->base->cr,
896 HASH_SET_BITS(&device_data->base->cr,
898 HASH_SET_BITS(&device_data->base->cr,
904 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
905 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
907 /* Number of bits in last word = (nbytes * 8) % 32 */
908 HASH_SET_NBLW((req->nbytes * 8) % 32);
909 req_ctx->updated = 1;
912 /* Store the nents in the dma struct. */
913 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
914 if (!ctx->device->dma.nents) {
915 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
917 ret = ctx->device->dma.nents;
921 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
922 if (bytes_written != req->nbytes) {
923 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
929 wait_for_completion(&ctx->device->dma.complete);
932 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
935 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
936 unsigned int keylen = ctx->keylen;
939 dev_dbg(device_data->dev, "%s: keylen: %d\n",
940 __func__, ctx->keylen);
941 hash_hw_write_key(device_data, key, keylen);
944 hash_get_digest(device_data, digest, ctx->config.algorithm);
945 memcpy(req->result, digest, ctx->digestsize);
948 release_hash_device(device_data);
951 * Allocated in setkey, and only used in HMAC.
959 * hash_hw_final - The final hash calculation function
960 * @req: The hash request for the job.
962 static int hash_hw_final(struct ahash_request *req)
965 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
966 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
967 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
968 struct hash_device_data *device_data;
969 u8 digest[SHA256_DIGEST_SIZE];
971 ret = hash_get_device_data(ctx, &device_data);
975 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
978 if (req_ctx->updated) {
979 ret = hash_resume_state(device_data, &device_data->state);
982 dev_err(device_data->dev,
983 "%s: hash_resume_state() failed!\n", __func__);
986 } else if (req->nbytes == 0 && ctx->keylen == 0) {
987 u8 zero_hash[SHA256_DIGEST_SIZE];
988 u32 zero_hash_size = 0;
989 bool zero_digest = false;
991 * Use a pre-calculated empty message digest
992 * (workaround since hw return zeroes, hw bug!?)
994 ret = get_empty_message_digest(device_data, &zero_hash[0],
995 &zero_hash_size, &zero_digest);
996 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
998 memcpy(req->result, &zero_hash[0], ctx->digestsize);
1000 } else if (!ret && !zero_digest) {
1001 dev_dbg(device_data->dev,
1002 "%s: HMAC zero msg with key, continue...\n",
1005 dev_err(device_data->dev,
1006 "%s: ret=%d, or wrong digest size? %s\n",
1008 zero_hash_size == ctx->digestsize ?
1013 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1015 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1020 if (!req_ctx->updated) {
1021 ret = init_hash_hw(device_data, ctx);
1023 dev_err(device_data->dev,
1024 "%s: init_hash_hw() failed!\n", __func__);
1029 if (req_ctx->state.index) {
1030 hash_messagepad(device_data, req_ctx->state.buffer,
1031 req_ctx->state.index);
1034 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1038 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1039 unsigned int keylen = ctx->keylen;
1042 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1043 __func__, ctx->keylen);
1044 hash_hw_write_key(device_data, key, keylen);
1047 hash_get_digest(device_data, digest, ctx->config.algorithm);
1048 memcpy(req->result, digest, ctx->digestsize);
1051 release_hash_device(device_data);
1054 * Allocated in setkey, and only used in HMAC.
1062 * hash_hw_update - Updates current HASH computation hashing another part of
1064 * @req: Byte array containing the message to be hashed (caller
1067 int hash_hw_update(struct ahash_request *req)
1072 struct hash_device_data *device_data;
1074 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1075 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1076 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1077 struct crypto_hash_walk walk;
1080 index = req_ctx->state.index;
1081 buffer = (u8 *)req_ctx->state.buffer;
1083 ret = hash_get_device_data(ctx, &device_data);
1087 msg_length = crypto_hash_walk_first(req, &walk);
1089 /* Empty message ("") is correct indata */
1090 if (msg_length == 0) {
1095 /* Check if ctx->state.length + msg_length
1097 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1098 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1099 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1100 ret = crypto_hash_walk_done(&walk, -EPERM);
1105 while (0 != msg_length) {
1106 data_buffer = walk.data;
1107 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1108 data_buffer, buffer, &index);
1111 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1113 crypto_hash_walk_done(&walk, ret);
1117 msg_length = crypto_hash_walk_done(&walk, 0);
1120 req_ctx->state.index = index;
1121 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1122 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1125 release_hash_device(device_data);
1131 * hash_resume_state - Function that resumes the state of an calculation.
1132 * @device_data: Pointer to the device structure.
1133 * @device_state: The state to be restored in the hash hardware
1135 int hash_resume_state(struct hash_device_data *device_data,
1136 const struct hash_state *device_state)
1140 int hash_mode = HASH_OPER_MODE_HASH;
1142 if (NULL == device_state) {
1143 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1148 /* Check correctness of index and length members */
1149 if (device_state->index > HASH_BLOCK_SIZE ||
1150 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1151 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1157 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1158 * prepare the initialize the HASH accelerator to compute the message
1159 * digest of a new message.
1163 temp_cr = device_state->temp_cr;
1164 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1166 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1167 hash_mode = HASH_OPER_MODE_HMAC;
1169 hash_mode = HASH_OPER_MODE_HASH;
1171 for (count = 0; count < HASH_CSR_COUNT; count++) {
1172 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1175 writel_relaxed(device_state->csr[count],
1176 &device_data->base->csrx[count]);
1179 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1180 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1182 writel_relaxed(device_state->str_reg, &device_data->base->str);
1183 writel_relaxed(temp_cr, &device_data->base->cr);
1189 * hash_save_state - Function that saves the state of hardware.
1190 * @device_data: Pointer to the device structure.
1191 * @device_state: The strucure where the hardware state should be saved.
1193 int hash_save_state(struct hash_device_data *device_data,
1194 struct hash_state *device_state)
1198 int hash_mode = HASH_OPER_MODE_HASH;
1200 if (NULL == device_state) {
1201 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1206 /* Write dummy value to force digest intermediate calculation. This
1207 * actually makes sure that there isn't any ongoing calculation in the
1210 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1213 temp_cr = readl_relaxed(&device_data->base->cr);
1215 device_state->str_reg = readl_relaxed(&device_data->base->str);
1217 device_state->din_reg = readl_relaxed(&device_data->base->din);
1219 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1220 hash_mode = HASH_OPER_MODE_HMAC;
1222 hash_mode = HASH_OPER_MODE_HASH;
1224 for (count = 0; count < HASH_CSR_COUNT; count++) {
1225 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1228 device_state->csr[count] =
1229 readl_relaxed(&device_data->base->csrx[count]);
1232 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1233 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1235 device_state->temp_cr = temp_cr;
1241 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1245 int hash_check_hw(struct hash_device_data *device_data)
1247 /* Checking Peripheral Ids */
1248 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1249 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1250 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1251 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1252 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1253 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1254 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1255 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1259 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1264 * hash_get_digest - Gets the digest.
1265 * @device_data: Pointer to the device structure.
1266 * @digest: User allocated byte array for the calculated digest.
1267 * @algorithm: The algorithm in use.
1269 void hash_get_digest(struct hash_device_data *device_data,
1270 u8 *digest, int algorithm)
1272 u32 temp_hx_val, count;
1275 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1276 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1277 __func__, algorithm);
1281 if (algorithm == HASH_ALGO_SHA1)
1282 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1284 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1286 dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
1287 __func__, (unsigned long)digest);
1289 /* Copy result into digest array */
1290 for (count = 0; count < loop_ctr; count++) {
1291 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1292 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1293 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1294 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1295 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1300 * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
1301 * @req: The hash request for the job.
1303 static int ahash_update(struct ahash_request *req)
1306 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1308 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1309 ret = hash_hw_update(req);
1310 /* Skip update for DMA, all data will be passed to DMA in final */
1313 pr_err("%s: hash_hw_update() failed!\n", __func__);
1320 * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
1321 * @req: The hash request for the job.
1323 static int ahash_final(struct ahash_request *req)
1326 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1328 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1330 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1331 ret = hash_dma_final(req);
1333 ret = hash_hw_final(req);
1336 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1342 static int hash_setkey(struct crypto_ahash *tfm,
1343 const u8 *key, unsigned int keylen, int alg)
1346 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1351 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1353 pr_err("%s: Failed to allocate ctx->key for %d\n",
1357 ctx->keylen = keylen;
1362 static int ahash_sha1_init(struct ahash_request *req)
1364 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1365 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1367 ctx->config.data_format = HASH_DATA_8_BITS;
1368 ctx->config.algorithm = HASH_ALGO_SHA1;
1369 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1370 ctx->digestsize = SHA1_DIGEST_SIZE;
1372 return ux500_hash_init(req);
1375 static int ahash_sha256_init(struct ahash_request *req)
1377 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1378 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1380 ctx->config.data_format = HASH_DATA_8_BITS;
1381 ctx->config.algorithm = HASH_ALGO_SHA256;
1382 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1383 ctx->digestsize = SHA256_DIGEST_SIZE;
1385 return ux500_hash_init(req);
1388 static int ahash_sha1_digest(struct ahash_request *req)
1392 ret1 = ahash_sha1_init(req);
1396 ret1 = ahash_update(req);
1397 ret2 = ahash_final(req);
1400 return ret1 ? ret1 : ret2;
1403 static int ahash_sha256_digest(struct ahash_request *req)
1407 ret1 = ahash_sha256_init(req);
1411 ret1 = ahash_update(req);
1412 ret2 = ahash_final(req);
1415 return ret1 ? ret1 : ret2;
1418 static int ahash_noimport(struct ahash_request *req, const void *in)
1423 static int ahash_noexport(struct ahash_request *req, void *out)
1428 static int hmac_sha1_init(struct ahash_request *req)
1430 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1431 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1433 ctx->config.data_format = HASH_DATA_8_BITS;
1434 ctx->config.algorithm = HASH_ALGO_SHA1;
1435 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1436 ctx->digestsize = SHA1_DIGEST_SIZE;
1438 return ux500_hash_init(req);
1441 static int hmac_sha256_init(struct ahash_request *req)
1443 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1444 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1446 ctx->config.data_format = HASH_DATA_8_BITS;
1447 ctx->config.algorithm = HASH_ALGO_SHA256;
1448 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1449 ctx->digestsize = SHA256_DIGEST_SIZE;
1451 return ux500_hash_init(req);
1454 static int hmac_sha1_digest(struct ahash_request *req)
1458 ret1 = hmac_sha1_init(req);
1462 ret1 = ahash_update(req);
1463 ret2 = ahash_final(req);
1466 return ret1 ? ret1 : ret2;
1469 static int hmac_sha256_digest(struct ahash_request *req)
1473 ret1 = hmac_sha256_init(req);
1477 ret1 = ahash_update(req);
1478 ret2 = ahash_final(req);
1481 return ret1 ? ret1 : ret2;
1484 static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1485 const u8 *key, unsigned int keylen)
1487 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1490 static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1491 const u8 *key, unsigned int keylen)
1493 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1496 struct hash_algo_template {
1497 struct hash_config conf;
1498 struct ahash_alg hash;
1501 static int hash_cra_init(struct crypto_tfm *tfm)
1503 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1504 struct crypto_alg *alg = tfm->__crt_alg;
1505 struct hash_algo_template *hash_alg;
1507 hash_alg = container_of(__crypto_ahash_alg(alg),
1508 struct hash_algo_template,
1511 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1512 sizeof(struct hash_req_ctx));
1514 ctx->config.data_format = HASH_DATA_8_BITS;
1515 ctx->config.algorithm = hash_alg->conf.algorithm;
1516 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1518 ctx->digestsize = hash_alg->hash.halg.digestsize;
1523 static struct hash_algo_template hash_algs[] = {
1525 .conf.algorithm = HASH_ALGO_SHA1,
1526 .conf.oper_mode = HASH_OPER_MODE_HASH,
1528 .init = ux500_hash_init,
1529 .update = ahash_update,
1530 .final = ahash_final,
1531 .digest = ahash_sha1_digest,
1532 .export = ahash_noexport,
1533 .import = ahash_noimport,
1534 .halg.digestsize = SHA1_DIGEST_SIZE,
1535 .halg.statesize = sizeof(struct hash_ctx),
1538 .cra_driver_name = "sha1-ux500",
1539 .cra_flags = CRYPTO_ALG_ASYNC,
1540 .cra_blocksize = SHA1_BLOCK_SIZE,
1541 .cra_ctxsize = sizeof(struct hash_ctx),
1542 .cra_init = hash_cra_init,
1543 .cra_module = THIS_MODULE,
1548 .conf.algorithm = HASH_ALGO_SHA256,
1549 .conf.oper_mode = HASH_OPER_MODE_HASH,
1551 .init = ux500_hash_init,
1552 .update = ahash_update,
1553 .final = ahash_final,
1554 .digest = ahash_sha256_digest,
1555 .export = ahash_noexport,
1556 .import = ahash_noimport,
1557 .halg.digestsize = SHA256_DIGEST_SIZE,
1558 .halg.statesize = sizeof(struct hash_ctx),
1560 .cra_name = "sha256",
1561 .cra_driver_name = "sha256-ux500",
1562 .cra_flags = CRYPTO_ALG_ASYNC,
1563 .cra_blocksize = SHA256_BLOCK_SIZE,
1564 .cra_ctxsize = sizeof(struct hash_ctx),
1565 .cra_init = hash_cra_init,
1566 .cra_module = THIS_MODULE,
1571 .conf.algorithm = HASH_ALGO_SHA1,
1572 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1574 .init = ux500_hash_init,
1575 .update = ahash_update,
1576 .final = ahash_final,
1577 .digest = hmac_sha1_digest,
1578 .setkey = hmac_sha1_setkey,
1579 .export = ahash_noexport,
1580 .import = ahash_noimport,
1581 .halg.digestsize = SHA1_DIGEST_SIZE,
1582 .halg.statesize = sizeof(struct hash_ctx),
1584 .cra_name = "hmac(sha1)",
1585 .cra_driver_name = "hmac-sha1-ux500",
1586 .cra_flags = CRYPTO_ALG_ASYNC,
1587 .cra_blocksize = SHA1_BLOCK_SIZE,
1588 .cra_ctxsize = sizeof(struct hash_ctx),
1589 .cra_init = hash_cra_init,
1590 .cra_module = THIS_MODULE,
1595 .conf.algorithm = HASH_ALGO_SHA256,
1596 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1598 .init = ux500_hash_init,
1599 .update = ahash_update,
1600 .final = ahash_final,
1601 .digest = hmac_sha256_digest,
1602 .setkey = hmac_sha256_setkey,
1603 .export = ahash_noexport,
1604 .import = ahash_noimport,
1605 .halg.digestsize = SHA256_DIGEST_SIZE,
1606 .halg.statesize = sizeof(struct hash_ctx),
1608 .cra_name = "hmac(sha256)",
1609 .cra_driver_name = "hmac-sha256-ux500",
1610 .cra_flags = CRYPTO_ALG_ASYNC,
1611 .cra_blocksize = SHA256_BLOCK_SIZE,
1612 .cra_ctxsize = sizeof(struct hash_ctx),
1613 .cra_init = hash_cra_init,
1614 .cra_module = THIS_MODULE,
1620 static int ahash_algs_register_all(struct hash_device_data *device_data)
1626 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1627 ret = crypto_register_ahash(&hash_algs[i].hash);
1630 dev_err(device_data->dev, "%s: alg registration failed\n",
1631 hash_algs[i].hash.halg.base.cra_driver_name);
1637 for (i = 0; i < count; i++)
1638 crypto_unregister_ahash(&hash_algs[i].hash);
1642 static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1646 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1647 crypto_unregister_ahash(&hash_algs[i].hash);
1651 * ux500_hash_probe - Function that probes the hash hardware.
1652 * @pdev: The platform device.
1654 static int ux500_hash_probe(struct platform_device *pdev)
1657 struct resource *res = NULL;
1658 struct hash_device_data *device_data;
1659 struct device *dev = &pdev->dev;
1661 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1667 device_data->dev = dev;
1668 device_data->current_ctx = NULL;
1670 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1672 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1677 device_data->phybase = res->start;
1678 device_data->base = devm_ioremap_resource(dev, res);
1679 if (IS_ERR(device_data->base)) {
1680 ret = PTR_ERR(device_data->base);
1683 spin_lock_init(&device_data->ctx_lock);
1684 spin_lock_init(&device_data->power_state_lock);
1686 /* Enable power for HASH1 hardware block */
1687 device_data->regulator = regulator_get(dev, "v-ape");
1688 if (IS_ERR(device_data->regulator)) {
1689 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1690 ret = PTR_ERR(device_data->regulator);
1691 device_data->regulator = NULL;
1695 /* Enable the clock for HASH1 hardware block */
1696 device_data->clk = devm_clk_get(dev, NULL);
1697 if (IS_ERR(device_data->clk)) {
1698 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1699 ret = PTR_ERR(device_data->clk);
1703 ret = clk_prepare(device_data->clk);
1705 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1709 /* Enable device power (and clock) */
1710 ret = hash_enable_power(device_data, false);
1712 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1713 goto out_clk_unprepare;
1716 ret = hash_check_hw(device_data);
1718 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1722 if (hash_mode == HASH_MODE_DMA)
1723 hash_dma_setup_channel(device_data, dev);
1725 platform_set_drvdata(pdev, device_data);
1727 /* Put the new device into the device list... */
1728 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1729 /* ... and signal that a new device is available. */
1730 up(&driver_data.device_allocation);
1732 ret = ahash_algs_register_all(device_data);
1734 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1739 dev_info(dev, "successfully registered\n");
1743 hash_disable_power(device_data, false);
1746 clk_unprepare(device_data->clk);
1749 regulator_put(device_data->regulator);
1756 * ux500_hash_remove - Function that removes the hash device from the platform.
1757 * @pdev: The platform device.
1759 static int ux500_hash_remove(struct platform_device *pdev)
1761 struct hash_device_data *device_data;
1762 struct device *dev = &pdev->dev;
1764 device_data = platform_get_drvdata(pdev);
1766 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1770 /* Try to decrease the number of available devices. */
1771 if (down_trylock(&driver_data.device_allocation))
1774 /* Check that the device is free */
1775 spin_lock(&device_data->ctx_lock);
1776 /* current_ctx allocates a device, NULL = unallocated */
1777 if (device_data->current_ctx) {
1778 /* The device is busy */
1779 spin_unlock(&device_data->ctx_lock);
1780 /* Return the device to the pool. */
1781 up(&driver_data.device_allocation);
1785 spin_unlock(&device_data->ctx_lock);
1787 /* Remove the device from the list */
1788 if (klist_node_attached(&device_data->list_node))
1789 klist_remove(&device_data->list_node);
1791 /* If this was the last device, remove the services */
1792 if (list_empty(&driver_data.device_list.k_list))
1793 ahash_algs_unregister_all(device_data);
1795 if (hash_disable_power(device_data, false))
1796 dev_err(dev, "%s: hash_disable_power() failed\n",
1799 clk_unprepare(device_data->clk);
1800 regulator_put(device_data->regulator);
1806 * ux500_hash_shutdown - Function that shutdown the hash device.
1807 * @pdev: The platform device
1809 static void ux500_hash_shutdown(struct platform_device *pdev)
1811 struct hash_device_data *device_data;
1813 device_data = platform_get_drvdata(pdev);
1815 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1820 /* Check that the device is free */
1821 spin_lock(&device_data->ctx_lock);
1822 /* current_ctx allocates a device, NULL = unallocated */
1823 if (!device_data->current_ctx) {
1824 if (down_trylock(&driver_data.device_allocation))
1825 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1828 * (Allocate the device)
1829 * Need to set this to non-null (dummy) value,
1830 * to avoid usage if context switching.
1832 device_data->current_ctx++;
1834 spin_unlock(&device_data->ctx_lock);
1836 /* Remove the device from the list */
1837 if (klist_node_attached(&device_data->list_node))
1838 klist_remove(&device_data->list_node);
1840 /* If this was the last device, remove the services */
1841 if (list_empty(&driver_data.device_list.k_list))
1842 ahash_algs_unregister_all(device_data);
1844 if (hash_disable_power(device_data, false))
1845 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1849 #ifdef CONFIG_PM_SLEEP
1851 * ux500_hash_suspend - Function that suspends the hash device.
1852 * @dev: Device to suspend.
1854 static int ux500_hash_suspend(struct device *dev)
1857 struct hash_device_data *device_data;
1858 struct hash_ctx *temp_ctx = NULL;
1860 device_data = dev_get_drvdata(dev);
1862 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1866 spin_lock(&device_data->ctx_lock);
1867 if (!device_data->current_ctx)
1868 device_data->current_ctx++;
1869 spin_unlock(&device_data->ctx_lock);
1871 if (device_data->current_ctx == ++temp_ctx) {
1872 if (down_interruptible(&driver_data.device_allocation))
1873 dev_dbg(dev, "%s: down_interruptible() failed\n",
1875 ret = hash_disable_power(device_data, false);
1878 ret = hash_disable_power(device_data, true);
1882 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1888 * ux500_hash_resume - Function that resume the hash device.
1889 * @dev: Device to resume.
1891 static int ux500_hash_resume(struct device *dev)
1894 struct hash_device_data *device_data;
1895 struct hash_ctx *temp_ctx = NULL;
1897 device_data = dev_get_drvdata(dev);
1899 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1903 spin_lock(&device_data->ctx_lock);
1904 if (device_data->current_ctx == ++temp_ctx)
1905 device_data->current_ctx = NULL;
1906 spin_unlock(&device_data->ctx_lock);
1908 if (!device_data->current_ctx)
1909 up(&driver_data.device_allocation);
1911 ret = hash_enable_power(device_data, true);
1914 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1920 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1922 static const struct of_device_id ux500_hash_match[] = {
1923 { .compatible = "stericsson,ux500-hash" },
1926 MODULE_DEVICE_TABLE(of, ux500_hash_match);
1928 static struct platform_driver hash_driver = {
1929 .probe = ux500_hash_probe,
1930 .remove = ux500_hash_remove,
1931 .shutdown = ux500_hash_shutdown,
1934 .of_match_table = ux500_hash_match,
1935 .pm = &ux500_hash_pm,
1940 * ux500_hash_mod_init - The kernel module init function.
1942 static int __init ux500_hash_mod_init(void)
1944 klist_init(&driver_data.device_list, NULL, NULL);
1945 /* Initialize the semaphore to 0 devices (locked state) */
1946 sema_init(&driver_data.device_allocation, 0);
1948 return platform_driver_register(&hash_driver);
1952 * ux500_hash_mod_fini - The kernel module exit function.
1954 static void __exit ux500_hash_mod_fini(void)
1956 platform_driver_unregister(&hash_driver);
1959 module_init(ux500_hash_mod_init);
1960 module_exit(ux500_hash_mod_fini);
1962 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1963 MODULE_LICENSE("GPL");
1965 MODULE_ALIAS_CRYPTO("sha1-all");
1966 MODULE_ALIAS_CRYPTO("sha256-all");
1967 MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1968 MODULE_ALIAS_CRYPTO("hmac-sha256-all");