3 * Support for Nomadik hardware crypto engine.
5 * Copyright (C) ST-Ericsson SA 2010
6 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
7 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
8 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
9 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
10 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
11 * License terms: GNU General Public License (GPL) version 2
14 #define pr_fmt(fmt) "hashX hashX: " fmt
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
21 #include <linux/klist.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/platform_device.h>
26 #include <linux/crypto.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/dmaengine.h>
30 #include <linux/bitops.h>
32 #include <crypto/internal/hash.h>
33 #include <crypto/sha.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
37 #include <linux/platform_data/crypto-ux500.h>
42 module_param(hash_mode, int, 0);
43 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
45 /* HMAC-SHA1, no key */
46 static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
47 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
48 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
49 0x70, 0x69, 0x0e, 0x1d
52 /* HMAC-SHA256, no key */
53 static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
54 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
55 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
56 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
57 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
61 * struct hash_driver_data - data specific to the driver.
63 * @device_list: A list of registered devices to choose from.
64 * @device_allocation: A semaphore initialized with number of devices.
66 struct hash_driver_data {
67 struct klist device_list;
68 struct semaphore device_allocation;
71 static struct hash_driver_data driver_data;
73 /* Declaration of functions */
75 * hash_messagepad - Pads a message and write the nblw bits.
76 * @device_data: Structure for the hash device.
77 * @message: Last word of a message
78 * @index_bytes: The number of bytes in the last message
80 * This function manages the final part of the digest calculation, when less
81 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
84 static void hash_messagepad(struct hash_device_data *device_data,
85 const u32 *message, u8 index_bytes);
88 * release_hash_device - Releases a previously allocated hash device.
89 * @device_data: Structure for the hash device.
92 static void release_hash_device(struct hash_device_data *device_data)
94 spin_lock(&device_data->ctx_lock);
95 device_data->current_ctx->device = NULL;
96 device_data->current_ctx = NULL;
97 spin_unlock(&device_data->ctx_lock);
100 * The down_interruptible part for this semaphore is called in
101 * cryp_get_device_data.
103 up(&driver_data.device_allocation);
106 static void hash_dma_setup_channel(struct hash_device_data *device_data,
109 struct hash_platform_data *platform_data = dev->platform_data;
110 struct dma_slave_config conf = {
111 .direction = DMA_MEM_TO_DEV,
112 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
113 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
117 dma_cap_zero(device_data->dma.mask);
118 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
120 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
121 device_data->dma.chan_mem2hash =
122 dma_request_channel(device_data->dma.mask,
123 platform_data->dma_filter,
124 device_data->dma.cfg_mem2hash);
126 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
128 init_completion(&device_data->dma.complete);
131 static void hash_dma_callback(void *data)
133 struct hash_ctx *ctx = data;
135 complete(&ctx->device->dma.complete);
138 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
139 int len, enum dma_data_direction direction)
141 struct dma_async_tx_descriptor *desc = NULL;
142 struct dma_chan *channel = NULL;
145 if (direction != DMA_TO_DEVICE) {
146 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
151 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
153 channel = ctx->device->dma.chan_mem2hash;
154 ctx->device->dma.sg = sg;
155 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
156 ctx->device->dma.sg, ctx->device->dma.nents,
159 if (!ctx->device->dma.sg_len) {
160 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
165 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
167 desc = dmaengine_prep_slave_sg(channel,
168 ctx->device->dma.sg, ctx->device->dma.sg_len,
169 DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
171 dev_err(ctx->device->dev,
172 "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
176 desc->callback = hash_dma_callback;
177 desc->callback_param = ctx;
179 cookie = dmaengine_submit(desc);
180 dma_async_issue_pending(channel);
185 static void hash_dma_done(struct hash_ctx *ctx)
187 struct dma_chan *chan;
189 chan = ctx->device->dma.chan_mem2hash;
190 dmaengine_terminate_all(chan);
191 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
192 ctx->device->dma.sg_len, DMA_TO_DEVICE);
195 static int hash_dma_write(struct hash_ctx *ctx,
196 struct scatterlist *sg, int len)
198 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
200 dev_dbg(ctx->device->dev,
201 "%s: hash_set_dma_transfer() failed\n", __func__);
209 * get_empty_message_digest - Returns a pre-calculated digest for
211 * @device_data: Structure for the hash device.
212 * @zero_hash: Buffer to return the empty message digest.
213 * @zero_hash_size: Hash size of the empty message digest.
214 * @zero_digest: True if zero_digest returned.
216 static int get_empty_message_digest(
217 struct hash_device_data *device_data,
218 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
221 struct hash_ctx *ctx = device_data->current_ctx;
222 *zero_digest = false;
225 * Caller responsible for ctx != NULL.
228 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
229 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
230 memcpy(zero_hash, &sha1_zero_message_hash[0],
232 *zero_hash_size = SHA1_DIGEST_SIZE;
234 } else if (HASH_ALGO_SHA256 ==
235 ctx->config.algorithm) {
236 memcpy(zero_hash, &sha256_zero_message_hash[0],
238 *zero_hash_size = SHA256_DIGEST_SIZE;
241 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
246 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
248 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
249 memcpy(zero_hash, &zero_message_hmac_sha1[0],
251 *zero_hash_size = SHA1_DIGEST_SIZE;
253 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
254 memcpy(zero_hash, &zero_message_hmac_sha256[0],
256 *zero_hash_size = SHA256_DIGEST_SIZE;
259 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
265 dev_dbg(device_data->dev,
266 "%s: Continue hash calculation, since hmac key available\n",
276 * hash_disable_power - Request to disable power and clock.
277 * @device_data: Structure for the hash device.
278 * @save_device_state: If true, saves the current hw state.
280 * This function request for disabling power (regulator) and clock,
281 * and could also save current hw state.
283 static int hash_disable_power(struct hash_device_data *device_data,
284 bool save_device_state)
287 struct device *dev = device_data->dev;
289 spin_lock(&device_data->power_state_lock);
290 if (!device_data->power_state)
293 if (save_device_state) {
294 hash_save_state(device_data,
295 &device_data->state);
296 device_data->restore_dev_state = true;
299 clk_disable(device_data->clk);
300 ret = regulator_disable(device_data->regulator);
302 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
304 device_data->power_state = false;
307 spin_unlock(&device_data->power_state_lock);
313 * hash_enable_power - Request to enable power and clock.
314 * @device_data: Structure for the hash device.
315 * @restore_device_state: If true, restores a previous saved hw state.
317 * This function request for enabling power (regulator) and clock,
318 * and could also restore a previously saved hw state.
320 static int hash_enable_power(struct hash_device_data *device_data,
321 bool restore_device_state)
324 struct device *dev = device_data->dev;
326 spin_lock(&device_data->power_state_lock);
327 if (!device_data->power_state) {
328 ret = regulator_enable(device_data->regulator);
330 dev_err(dev, "%s: regulator_enable() failed!\n",
334 ret = clk_enable(device_data->clk);
336 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
337 ret = regulator_disable(
338 device_data->regulator);
341 device_data->power_state = true;
344 if (device_data->restore_dev_state) {
345 if (restore_device_state) {
346 device_data->restore_dev_state = false;
347 hash_resume_state(device_data, &device_data->state);
351 spin_unlock(&device_data->power_state_lock);
357 * hash_get_device_data - Checks for an available hash device and return it.
358 * @hash_ctx: Structure for the hash context.
359 * @device_data: Structure for the hash device.
361 * This function check for an available hash device and return it to
363 * Note! Caller need to release the device, calling up().
365 static int hash_get_device_data(struct hash_ctx *ctx,
366 struct hash_device_data **device_data)
369 struct klist_iter device_iterator;
370 struct klist_node *device_node;
371 struct hash_device_data *local_device_data = NULL;
373 /* Wait until a device is available */
374 ret = down_interruptible(&driver_data.device_allocation);
376 return ret; /* Interrupted */
378 /* Select a device */
379 klist_iter_init(&driver_data.device_list, &device_iterator);
380 device_node = klist_next(&device_iterator);
381 while (device_node) {
382 local_device_data = container_of(device_node,
383 struct hash_device_data, list_node);
384 spin_lock(&local_device_data->ctx_lock);
385 /* current_ctx allocates a device, NULL = unallocated */
386 if (local_device_data->current_ctx) {
387 device_node = klist_next(&device_iterator);
389 local_device_data->current_ctx = ctx;
390 ctx->device = local_device_data;
391 spin_unlock(&local_device_data->ctx_lock);
394 spin_unlock(&local_device_data->ctx_lock);
396 klist_iter_exit(&device_iterator);
400 * No free device found.
401 * Since we allocated a device with down_interruptible, this
402 * should not be able to happen.
403 * Number of available devices, which are contained in
404 * device_allocation, is therefore decremented by not doing
405 * an up(device_allocation).
410 *device_data = local_device_data;
416 * hash_hw_write_key - Writes the key to the hardware registries.
418 * @device_data: Structure for the hash device.
419 * @key: Key to be written.
420 * @keylen: The lengt of the key.
422 * Note! This function DOES NOT write to the NBLW registry, even though
423 * specified in the the hw design spec. Either due to incorrect info in the
424 * spec or due to a bug in the hw.
426 static void hash_hw_write_key(struct hash_device_data *device_data,
427 const u8 *key, unsigned int keylen)
432 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
434 while (keylen >= 4) {
435 u32 *key_word = (u32 *)key;
437 HASH_SET_DIN(key_word, nwords);
442 /* Take care of the remaining bytes in the last word */
446 word |= (key[keylen - 1] << (8 * (keylen - 1)));
450 HASH_SET_DIN(&word, nwords);
453 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
458 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
463 * init_hash_hw - Initialise the hash hardware for a new calculation.
464 * @device_data: Structure for the hash device.
465 * @ctx: The hash context.
467 * This function will enable the bits needed to clear and start a new
470 static int init_hash_hw(struct hash_device_data *device_data,
471 struct hash_ctx *ctx)
475 ret = hash_setconfiguration(device_data, &ctx->config);
477 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
482 hash_begin(device_data, ctx);
484 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
485 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
491 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
494 * @size: Size in bytes.
495 * @aligned: True if sg data aligned to work in DMA mode.
498 static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
501 bool aligned_data = true;
503 while (size > 0 && sg) {
507 /* hash_set_dma_transfer will align last nent */
508 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
509 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
510 aligned_data = false;
516 *aligned = aligned_data;
525 * hash_dma_valid_data - checks for dma valid sg data.
527 * @datasize: Datasize in bytes.
529 * NOTE! This function checks for dma valid sg data, since dma
530 * only accept datasizes of even wordsize.
532 static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
536 /* Need to include at least one nent, else error */
537 if (hash_get_nents(sg, datasize, &aligned) < 1)
544 * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
545 * @req: The hash request for the job.
547 * Initialize structures.
549 static int hash_init(struct ahash_request *req)
551 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
552 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
553 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
558 memset(&req_ctx->state, 0, sizeof(struct hash_state));
559 req_ctx->updated = 0;
560 if (hash_mode == HASH_MODE_DMA) {
561 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
562 req_ctx->dma_mode = false; /* Don't use DMA */
564 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
565 __func__, HASH_DMA_ALIGN_SIZE);
567 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
568 hash_dma_valid_data(req->src, req->nbytes)) {
569 req_ctx->dma_mode = true;
571 req_ctx->dma_mode = false;
572 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
574 HASH_DMA_PERFORMANCE_MIN_SIZE);
582 * hash_processblock - This function processes a single block of 512 bits (64
583 * bytes), word aligned, starting at message.
584 * @device_data: Structure for the hash device.
585 * @message: Block (512 bits) of message to be written to
589 static void hash_processblock(struct hash_device_data *device_data,
590 const u32 *message, int length)
592 int len = length / HASH_BYTES_PER_WORD;
594 * NBLW bits. Reset the number of bits in last word (NBLW).
596 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
599 * Write message data to the HASH_DIN register.
601 HASH_SET_DIN(message, len);
605 * hash_messagepad - Pads a message and write the nblw bits.
606 * @device_data: Structure for the hash device.
607 * @message: Last word of a message.
608 * @index_bytes: The number of bytes in the last message.
610 * This function manages the final part of the digest calculation, when less
611 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
614 static void hash_messagepad(struct hash_device_data *device_data,
615 const u32 *message, u8 index_bytes)
620 * Clear hash str register, only clear NBLW
621 * since DCAL will be reset by hardware.
623 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
626 while (index_bytes >= 4) {
627 HASH_SET_DIN(message, nwords);
633 HASH_SET_DIN(message, nwords);
635 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
638 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
639 HASH_SET_NBLW(index_bytes * 8);
640 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
641 __func__, readl_relaxed(&device_data->base->din),
642 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
644 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
645 __func__, readl_relaxed(&device_data->base->din),
646 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
648 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
653 * hash_incrementlength - Increments the length of the current message.
655 * @incr: Length of message processed already
657 * Overflow cannot occur, because conditions for overflow are checked in
660 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
662 ctx->state.length.low_word += incr;
664 /* Check for wrap-around */
665 if (ctx->state.length.low_word < incr)
666 ctx->state.length.high_word++;
670 * hash_setconfiguration - Sets the required configuration for the hash
672 * @device_data: Structure for the hash device.
673 * @config: Pointer to a configuration structure.
675 int hash_setconfiguration(struct hash_device_data *device_data,
676 struct hash_config *config)
680 if (config->algorithm != HASH_ALGO_SHA1 &&
681 config->algorithm != HASH_ALGO_SHA256)
685 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
686 * to be written to HASH_DIN is considered as 32 bits.
688 HASH_SET_DATA_FORMAT(config->data_format);
691 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
693 switch (config->algorithm) {
695 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
698 case HASH_ALGO_SHA256:
699 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
703 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
709 * MODE bit. This bit selects between HASH or HMAC mode for the
710 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
712 if (HASH_OPER_MODE_HASH == config->oper_mode)
713 HASH_CLEAR_BITS(&device_data->base->cr,
715 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
716 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
717 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
718 /* Truncate key to blocksize */
719 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
720 HASH_SET_BITS(&device_data->base->cr,
723 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
725 HASH_CLEAR_BITS(&device_data->base->cr,
728 } else { /* Wrong hash mode */
730 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
737 * hash_begin - This routine resets some globals and initializes the hash
739 * @device_data: Structure for the hash device.
740 * @ctx: Hash context.
742 void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
744 /* HW and SW initializations */
745 /* Note: there is no need to initialize buffer and digest members */
747 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
751 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
752 * prepare the initialize the HASH accelerator to compute the message
753 * digest of a new message.
758 * NBLW bits. Reset the number of bits in last word (NBLW).
760 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
763 static int hash_process_data(struct hash_device_data *device_data,
764 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
765 int msg_length, u8 *data_buffer, u8 *buffer,
772 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
773 for (count = 0; count < msg_length; count++) {
774 buffer[*index + count] =
775 *(data_buffer + count);
777 *index += msg_length;
780 if (req_ctx->updated) {
781 ret = hash_resume_state(device_data,
782 &device_data->state);
783 memmove(req_ctx->state.buffer,
784 device_data->state.buffer,
787 dev_err(device_data->dev,
788 "%s: hash_resume_state() failed!\n",
793 ret = init_hash_hw(device_data, ctx);
795 dev_err(device_data->dev,
796 "%s: init_hash_hw() failed!\n",
800 req_ctx->updated = 1;
803 * If 'data_buffer' is four byte aligned and
804 * local buffer does not have any data, we can
805 * write data directly from 'data_buffer' to
806 * HW peripheral, otherwise we first copy data
809 if ((0 == (((u32)data_buffer) % 4)) &&
811 hash_processblock(device_data,
812 (const u32 *)data_buffer,
816 count < (u32)(HASH_BLOCK_SIZE - *index);
818 buffer[*index + count] =
819 *(data_buffer + count);
821 hash_processblock(device_data,
825 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
826 data_buffer += (HASH_BLOCK_SIZE - *index);
828 msg_length -= (HASH_BLOCK_SIZE - *index);
831 ret = hash_save_state(device_data,
832 &device_data->state);
834 memmove(device_data->state.buffer,
835 req_ctx->state.buffer,
838 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
843 } while (msg_length != 0);
850 * hash_dma_final - The hash dma final function for SHA1/SHA256.
851 * @req: The hash request for the job.
853 static int hash_dma_final(struct ahash_request *req)
856 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
857 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
858 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
859 struct hash_device_data *device_data;
860 u8 digest[SHA256_DIGEST_SIZE];
861 int bytes_written = 0;
863 ret = hash_get_device_data(ctx, &device_data);
867 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
869 if (req_ctx->updated) {
870 ret = hash_resume_state(device_data, &device_data->state);
873 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
879 if (!req_ctx->updated) {
880 ret = hash_setconfiguration(device_data, &ctx->config);
882 dev_err(device_data->dev,
883 "%s: hash_setconfiguration() failed!\n",
888 /* Enable DMA input */
889 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
890 HASH_CLEAR_BITS(&device_data->base->cr,
893 HASH_SET_BITS(&device_data->base->cr,
895 HASH_SET_BITS(&device_data->base->cr,
901 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
902 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
904 /* Number of bits in last word = (nbytes * 8) % 32 */
905 HASH_SET_NBLW((req->nbytes * 8) % 32);
906 req_ctx->updated = 1;
909 /* Store the nents in the dma struct. */
910 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
911 if (!ctx->device->dma.nents) {
912 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
914 ret = ctx->device->dma.nents;
918 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
919 if (bytes_written != req->nbytes) {
920 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
926 wait_for_completion(&ctx->device->dma.complete);
929 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
932 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
933 unsigned int keylen = ctx->keylen;
936 dev_dbg(device_data->dev, "%s: keylen: %d\n",
937 __func__, ctx->keylen);
938 hash_hw_write_key(device_data, key, keylen);
941 hash_get_digest(device_data, digest, ctx->config.algorithm);
942 memcpy(req->result, digest, ctx->digestsize);
945 release_hash_device(device_data);
948 * Allocated in setkey, and only used in HMAC.
956 * hash_hw_final - The final hash calculation function
957 * @req: The hash request for the job.
959 static int hash_hw_final(struct ahash_request *req)
962 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
963 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
964 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
965 struct hash_device_data *device_data;
966 u8 digest[SHA256_DIGEST_SIZE];
968 ret = hash_get_device_data(ctx, &device_data);
972 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
974 if (req_ctx->updated) {
975 ret = hash_resume_state(device_data, &device_data->state);
978 dev_err(device_data->dev,
979 "%s: hash_resume_state() failed!\n", __func__);
982 } else if (req->nbytes == 0 && ctx->keylen == 0) {
983 u8 zero_hash[SHA256_DIGEST_SIZE];
984 u32 zero_hash_size = 0;
985 bool zero_digest = false;
987 * Use a pre-calculated empty message digest
988 * (workaround since hw return zeroes, hw bug!?)
990 ret = get_empty_message_digest(device_data, &zero_hash[0],
991 &zero_hash_size, &zero_digest);
992 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
994 memcpy(req->result, &zero_hash[0], ctx->digestsize);
996 } else if (!ret && !zero_digest) {
997 dev_dbg(device_data->dev,
998 "%s: HMAC zero msg with key, continue...\n",
1001 dev_err(device_data->dev,
1002 "%s: ret=%d, or wrong digest size? %s\n",
1004 zero_hash_size == ctx->digestsize ?
1009 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1011 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1016 if (!req_ctx->updated) {
1017 ret = init_hash_hw(device_data, ctx);
1019 dev_err(device_data->dev,
1020 "%s: init_hash_hw() failed!\n", __func__);
1025 if (req_ctx->state.index) {
1026 hash_messagepad(device_data, req_ctx->state.buffer,
1027 req_ctx->state.index);
1030 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1034 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1035 unsigned int keylen = ctx->keylen;
1038 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1039 __func__, ctx->keylen);
1040 hash_hw_write_key(device_data, key, keylen);
1043 hash_get_digest(device_data, digest, ctx->config.algorithm);
1044 memcpy(req->result, digest, ctx->digestsize);
1047 release_hash_device(device_data);
1050 * Allocated in setkey, and only used in HMAC.
1058 * hash_hw_update - Updates current HASH computation hashing another part of
1060 * @req: Byte array containing the message to be hashed (caller
1063 int hash_hw_update(struct ahash_request *req)
1068 struct hash_device_data *device_data;
1070 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1071 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1072 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1073 struct crypto_hash_walk walk;
1074 int msg_length = crypto_hash_walk_first(req, &walk);
1076 /* Empty message ("") is correct indata */
1077 if (msg_length == 0)
1080 index = req_ctx->state.index;
1081 buffer = (u8 *)req_ctx->state.buffer;
1083 /* Check if ctx->state.length + msg_length
1085 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1086 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1087 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1091 ret = hash_get_device_data(ctx, &device_data);
1096 while (0 != msg_length) {
1097 data_buffer = walk.data;
1098 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1099 data_buffer, buffer, &index);
1102 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1107 msg_length = crypto_hash_walk_done(&walk, 0);
1110 req_ctx->state.index = index;
1111 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1112 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1115 release_hash_device(device_data);
1121 * hash_resume_state - Function that resumes the state of an calculation.
1122 * @device_data: Pointer to the device structure.
1123 * @device_state: The state to be restored in the hash hardware
1125 int hash_resume_state(struct hash_device_data *device_data,
1126 const struct hash_state *device_state)
1130 int hash_mode = HASH_OPER_MODE_HASH;
1132 if (NULL == device_state) {
1133 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1138 /* Check correctness of index and length members */
1139 if (device_state->index > HASH_BLOCK_SIZE ||
1140 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1141 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1147 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1148 * prepare the initialize the HASH accelerator to compute the message
1149 * digest of a new message.
1153 temp_cr = device_state->temp_cr;
1154 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1156 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1157 hash_mode = HASH_OPER_MODE_HMAC;
1159 hash_mode = HASH_OPER_MODE_HASH;
1161 for (count = 0; count < HASH_CSR_COUNT; count++) {
1162 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1165 writel_relaxed(device_state->csr[count],
1166 &device_data->base->csrx[count]);
1169 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1170 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1172 writel_relaxed(device_state->str_reg, &device_data->base->str);
1173 writel_relaxed(temp_cr, &device_data->base->cr);
1179 * hash_save_state - Function that saves the state of hardware.
1180 * @device_data: Pointer to the device structure.
1181 * @device_state: The strucure where the hardware state should be saved.
1183 int hash_save_state(struct hash_device_data *device_data,
1184 struct hash_state *device_state)
1188 int hash_mode = HASH_OPER_MODE_HASH;
1190 if (NULL == device_state) {
1191 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1196 /* Write dummy value to force digest intermediate calculation. This
1197 * actually makes sure that there isn't any ongoing calculation in the
1200 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1203 temp_cr = readl_relaxed(&device_data->base->cr);
1205 device_state->str_reg = readl_relaxed(&device_data->base->str);
1207 device_state->din_reg = readl_relaxed(&device_data->base->din);
1209 if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1210 hash_mode = HASH_OPER_MODE_HMAC;
1212 hash_mode = HASH_OPER_MODE_HASH;
1214 for (count = 0; count < HASH_CSR_COUNT; count++) {
1215 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1218 device_state->csr[count] =
1219 readl_relaxed(&device_data->base->csrx[count]);
1222 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1223 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1225 device_state->temp_cr = temp_cr;
1231 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1235 int hash_check_hw(struct hash_device_data *device_data)
1237 /* Checking Peripheral Ids */
1238 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1239 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1240 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1241 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1242 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1243 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1244 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1245 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1249 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1254 * hash_get_digest - Gets the digest.
1255 * @device_data: Pointer to the device structure.
1256 * @digest: User allocated byte array for the calculated digest.
1257 * @algorithm: The algorithm in use.
1259 void hash_get_digest(struct hash_device_data *device_data,
1260 u8 *digest, int algorithm)
1262 u32 temp_hx_val, count;
1265 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1266 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1267 __func__, algorithm);
1271 if (algorithm == HASH_ALGO_SHA1)
1272 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1274 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1276 dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
1277 __func__, (u32) digest);
1279 /* Copy result into digest array */
1280 for (count = 0; count < loop_ctr; count++) {
1281 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1282 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1283 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1284 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1285 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1290 * hash_update - The hash update function for SHA1/SHA2 (SHA256).
1291 * @req: The hash request for the job.
1293 static int ahash_update(struct ahash_request *req)
1296 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1298 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1299 ret = hash_hw_update(req);
1300 /* Skip update for DMA, all data will be passed to DMA in final */
1303 pr_err("%s: hash_hw_update() failed!\n", __func__);
1310 * hash_final - The hash final function for SHA1/SHA2 (SHA256).
1311 * @req: The hash request for the job.
1313 static int ahash_final(struct ahash_request *req)
1316 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1318 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1320 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1321 ret = hash_dma_final(req);
1323 ret = hash_hw_final(req);
1326 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1332 static int hash_setkey(struct crypto_ahash *tfm,
1333 const u8 *key, unsigned int keylen, int alg)
1336 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1341 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1343 pr_err("%s: Failed to allocate ctx->key for %d\n",
1347 ctx->keylen = keylen;
1352 static int ahash_sha1_init(struct ahash_request *req)
1354 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1355 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1357 ctx->config.data_format = HASH_DATA_8_BITS;
1358 ctx->config.algorithm = HASH_ALGO_SHA1;
1359 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1360 ctx->digestsize = SHA1_DIGEST_SIZE;
1362 return hash_init(req);
1365 static int ahash_sha256_init(struct ahash_request *req)
1367 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1368 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1370 ctx->config.data_format = HASH_DATA_8_BITS;
1371 ctx->config.algorithm = HASH_ALGO_SHA256;
1372 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1373 ctx->digestsize = SHA256_DIGEST_SIZE;
1375 return hash_init(req);
1378 static int ahash_sha1_digest(struct ahash_request *req)
1382 ret1 = ahash_sha1_init(req);
1386 ret1 = ahash_update(req);
1387 ret2 = ahash_final(req);
1390 return ret1 ? ret1 : ret2;
1393 static int ahash_sha256_digest(struct ahash_request *req)
1397 ret1 = ahash_sha256_init(req);
1401 ret1 = ahash_update(req);
1402 ret2 = ahash_final(req);
1405 return ret1 ? ret1 : ret2;
1408 static int ahash_noimport(struct ahash_request *req, const void *in)
1413 static int ahash_noexport(struct ahash_request *req, void *out)
1418 static int hmac_sha1_init(struct ahash_request *req)
1420 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1421 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1423 ctx->config.data_format = HASH_DATA_8_BITS;
1424 ctx->config.algorithm = HASH_ALGO_SHA1;
1425 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1426 ctx->digestsize = SHA1_DIGEST_SIZE;
1428 return hash_init(req);
1431 static int hmac_sha256_init(struct ahash_request *req)
1433 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1434 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1436 ctx->config.data_format = HASH_DATA_8_BITS;
1437 ctx->config.algorithm = HASH_ALGO_SHA256;
1438 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1439 ctx->digestsize = SHA256_DIGEST_SIZE;
1441 return hash_init(req);
1444 static int hmac_sha1_digest(struct ahash_request *req)
1448 ret1 = hmac_sha1_init(req);
1452 ret1 = ahash_update(req);
1453 ret2 = ahash_final(req);
1456 return ret1 ? ret1 : ret2;
1459 static int hmac_sha256_digest(struct ahash_request *req)
1463 ret1 = hmac_sha256_init(req);
1467 ret1 = ahash_update(req);
1468 ret2 = ahash_final(req);
1471 return ret1 ? ret1 : ret2;
1474 static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1475 const u8 *key, unsigned int keylen)
1477 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1480 static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1481 const u8 *key, unsigned int keylen)
1483 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1486 struct hash_algo_template {
1487 struct hash_config conf;
1488 struct ahash_alg hash;
1491 static int hash_cra_init(struct crypto_tfm *tfm)
1493 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1494 struct crypto_alg *alg = tfm->__crt_alg;
1495 struct hash_algo_template *hash_alg;
1497 hash_alg = container_of(__crypto_ahash_alg(alg),
1498 struct hash_algo_template,
1501 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1502 sizeof(struct hash_req_ctx));
1504 ctx->config.data_format = HASH_DATA_8_BITS;
1505 ctx->config.algorithm = hash_alg->conf.algorithm;
1506 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1508 ctx->digestsize = hash_alg->hash.halg.digestsize;
1513 static struct hash_algo_template hash_algs[] = {
1515 .conf.algorithm = HASH_ALGO_SHA1,
1516 .conf.oper_mode = HASH_OPER_MODE_HASH,
1519 .update = ahash_update,
1520 .final = ahash_final,
1521 .digest = ahash_sha1_digest,
1522 .export = ahash_noexport,
1523 .import = ahash_noimport,
1524 .halg.digestsize = SHA1_DIGEST_SIZE,
1525 .halg.statesize = sizeof(struct hash_ctx),
1528 .cra_driver_name = "sha1-ux500",
1529 .cra_flags = CRYPTO_ALG_ASYNC,
1530 .cra_blocksize = SHA1_BLOCK_SIZE,
1531 .cra_ctxsize = sizeof(struct hash_ctx),
1532 .cra_init = hash_cra_init,
1533 .cra_module = THIS_MODULE,
1538 .conf.algorithm = HASH_ALGO_SHA256,
1539 .conf.oper_mode = HASH_OPER_MODE_HASH,
1542 .update = ahash_update,
1543 .final = ahash_final,
1544 .digest = ahash_sha256_digest,
1545 .export = ahash_noexport,
1546 .import = ahash_noimport,
1547 .halg.digestsize = SHA256_DIGEST_SIZE,
1548 .halg.statesize = sizeof(struct hash_ctx),
1550 .cra_name = "sha256",
1551 .cra_driver_name = "sha256-ux500",
1552 .cra_flags = CRYPTO_ALG_ASYNC,
1553 .cra_blocksize = SHA256_BLOCK_SIZE,
1554 .cra_ctxsize = sizeof(struct hash_ctx),
1555 .cra_init = hash_cra_init,
1556 .cra_module = THIS_MODULE,
1561 .conf.algorithm = HASH_ALGO_SHA1,
1562 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1565 .update = ahash_update,
1566 .final = ahash_final,
1567 .digest = hmac_sha1_digest,
1568 .setkey = hmac_sha1_setkey,
1569 .export = ahash_noexport,
1570 .import = ahash_noimport,
1571 .halg.digestsize = SHA1_DIGEST_SIZE,
1572 .halg.statesize = sizeof(struct hash_ctx),
1574 .cra_name = "hmac(sha1)",
1575 .cra_driver_name = "hmac-sha1-ux500",
1576 .cra_flags = CRYPTO_ALG_ASYNC,
1577 .cra_blocksize = SHA1_BLOCK_SIZE,
1578 .cra_ctxsize = sizeof(struct hash_ctx),
1579 .cra_init = hash_cra_init,
1580 .cra_module = THIS_MODULE,
1585 .conf.algorithm = HASH_ALGO_SHA256,
1586 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1589 .update = ahash_update,
1590 .final = ahash_final,
1591 .digest = hmac_sha256_digest,
1592 .setkey = hmac_sha256_setkey,
1593 .export = ahash_noexport,
1594 .import = ahash_noimport,
1595 .halg.digestsize = SHA256_DIGEST_SIZE,
1596 .halg.statesize = sizeof(struct hash_ctx),
1598 .cra_name = "hmac(sha256)",
1599 .cra_driver_name = "hmac-sha256-ux500",
1600 .cra_flags = CRYPTO_ALG_ASYNC,
1601 .cra_blocksize = SHA256_BLOCK_SIZE,
1602 .cra_ctxsize = sizeof(struct hash_ctx),
1603 .cra_init = hash_cra_init,
1604 .cra_module = THIS_MODULE,
1611 * hash_algs_register_all -
1613 static int ahash_algs_register_all(struct hash_device_data *device_data)
1619 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1620 ret = crypto_register_ahash(&hash_algs[i].hash);
1623 dev_err(device_data->dev, "%s: alg registration failed\n",
1624 hash_algs[i].hash.halg.base.cra_driver_name);
1630 for (i = 0; i < count; i++)
1631 crypto_unregister_ahash(&hash_algs[i].hash);
1636 * hash_algs_unregister_all -
1638 static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1642 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1643 crypto_unregister_ahash(&hash_algs[i].hash);
1647 * ux500_hash_probe - Function that probes the hash hardware.
1648 * @pdev: The platform device.
1650 static int ux500_hash_probe(struct platform_device *pdev)
1653 struct resource *res = NULL;
1654 struct hash_device_data *device_data;
1655 struct device *dev = &pdev->dev;
1657 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1663 device_data->dev = dev;
1664 device_data->current_ctx = NULL;
1666 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1668 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1673 device_data->phybase = res->start;
1674 device_data->base = devm_ioremap_resource(dev, res);
1675 if (IS_ERR(device_data->base)) {
1676 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1677 ret = PTR_ERR(device_data->base);
1680 spin_lock_init(&device_data->ctx_lock);
1681 spin_lock_init(&device_data->power_state_lock);
1683 /* Enable power for HASH1 hardware block */
1684 device_data->regulator = regulator_get(dev, "v-ape");
1685 if (IS_ERR(device_data->regulator)) {
1686 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1687 ret = PTR_ERR(device_data->regulator);
1688 device_data->regulator = NULL;
1692 /* Enable the clock for HASH1 hardware block */
1693 device_data->clk = devm_clk_get(dev, NULL);
1694 if (IS_ERR(device_data->clk)) {
1695 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1696 ret = PTR_ERR(device_data->clk);
1700 ret = clk_prepare(device_data->clk);
1702 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1706 /* Enable device power (and clock) */
1707 ret = hash_enable_power(device_data, false);
1709 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1710 goto out_clk_unprepare;
1713 ret = hash_check_hw(device_data);
1715 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1719 if (hash_mode == HASH_MODE_DMA)
1720 hash_dma_setup_channel(device_data, dev);
1722 platform_set_drvdata(pdev, device_data);
1724 /* Put the new device into the device list... */
1725 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1726 /* ... and signal that a new device is available. */
1727 up(&driver_data.device_allocation);
1729 ret = ahash_algs_register_all(device_data);
1731 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1736 dev_info(dev, "successfully registered\n");
1740 hash_disable_power(device_data, false);
1743 clk_unprepare(device_data->clk);
1746 regulator_put(device_data->regulator);
1753 * ux500_hash_remove - Function that removes the hash device from the platform.
1754 * @pdev: The platform device.
1756 static int ux500_hash_remove(struct platform_device *pdev)
1758 struct hash_device_data *device_data;
1759 struct device *dev = &pdev->dev;
1761 device_data = platform_get_drvdata(pdev);
1763 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1767 /* Try to decrease the number of available devices. */
1768 if (down_trylock(&driver_data.device_allocation))
1771 /* Check that the device is free */
1772 spin_lock(&device_data->ctx_lock);
1773 /* current_ctx allocates a device, NULL = unallocated */
1774 if (device_data->current_ctx) {
1775 /* The device is busy */
1776 spin_unlock(&device_data->ctx_lock);
1777 /* Return the device to the pool. */
1778 up(&driver_data.device_allocation);
1782 spin_unlock(&device_data->ctx_lock);
1784 /* Remove the device from the list */
1785 if (klist_node_attached(&device_data->list_node))
1786 klist_remove(&device_data->list_node);
1788 /* If this was the last device, remove the services */
1789 if (list_empty(&driver_data.device_list.k_list))
1790 ahash_algs_unregister_all(device_data);
1792 if (hash_disable_power(device_data, false))
1793 dev_err(dev, "%s: hash_disable_power() failed\n",
1796 clk_unprepare(device_data->clk);
1797 regulator_put(device_data->regulator);
1803 * ux500_hash_shutdown - Function that shutdown the hash device.
1804 * @pdev: The platform device
1806 static void ux500_hash_shutdown(struct platform_device *pdev)
1808 struct hash_device_data *device_data;
1810 device_data = platform_get_drvdata(pdev);
1812 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1817 /* Check that the device is free */
1818 spin_lock(&device_data->ctx_lock);
1819 /* current_ctx allocates a device, NULL = unallocated */
1820 if (!device_data->current_ctx) {
1821 if (down_trylock(&driver_data.device_allocation))
1822 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1825 * (Allocate the device)
1826 * Need to set this to non-null (dummy) value,
1827 * to avoid usage if context switching.
1829 device_data->current_ctx++;
1831 spin_unlock(&device_data->ctx_lock);
1833 /* Remove the device from the list */
1834 if (klist_node_attached(&device_data->list_node))
1835 klist_remove(&device_data->list_node);
1837 /* If this was the last device, remove the services */
1838 if (list_empty(&driver_data.device_list.k_list))
1839 ahash_algs_unregister_all(device_data);
1841 if (hash_disable_power(device_data, false))
1842 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1846 #ifdef CONFIG_PM_SLEEP
1848 * ux500_hash_suspend - Function that suspends the hash device.
1849 * @dev: Device to suspend.
1851 static int ux500_hash_suspend(struct device *dev)
1854 struct hash_device_data *device_data;
1855 struct hash_ctx *temp_ctx = NULL;
1857 device_data = dev_get_drvdata(dev);
1859 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1863 spin_lock(&device_data->ctx_lock);
1864 if (!device_data->current_ctx)
1865 device_data->current_ctx++;
1866 spin_unlock(&device_data->ctx_lock);
1868 if (device_data->current_ctx == ++temp_ctx) {
1869 if (down_interruptible(&driver_data.device_allocation))
1870 dev_dbg(dev, "%s: down_interruptible() failed\n",
1872 ret = hash_disable_power(device_data, false);
1875 ret = hash_disable_power(device_data, true);
1879 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1885 * ux500_hash_resume - Function that resume the hash device.
1886 * @dev: Device to resume.
1888 static int ux500_hash_resume(struct device *dev)
1891 struct hash_device_data *device_data;
1892 struct hash_ctx *temp_ctx = NULL;
1894 device_data = dev_get_drvdata(dev);
1896 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1900 spin_lock(&device_data->ctx_lock);
1901 if (device_data->current_ctx == ++temp_ctx)
1902 device_data->current_ctx = NULL;
1903 spin_unlock(&device_data->ctx_lock);
1905 if (!device_data->current_ctx)
1906 up(&driver_data.device_allocation);
1908 ret = hash_enable_power(device_data, true);
1911 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1917 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1919 static const struct of_device_id ux500_hash_match[] = {
1920 { .compatible = "stericsson,ux500-hash" },
1923 MODULE_DEVICE_TABLE(of, ux500_hash_match);
1925 static struct platform_driver hash_driver = {
1926 .probe = ux500_hash_probe,
1927 .remove = ux500_hash_remove,
1928 .shutdown = ux500_hash_shutdown,
1931 .of_match_table = ux500_hash_match,
1932 .pm = &ux500_hash_pm,
1937 * ux500_hash_mod_init - The kernel module init function.
1939 static int __init ux500_hash_mod_init(void)
1941 klist_init(&driver_data.device_list, NULL, NULL);
1942 /* Initialize the semaphore to 0 devices (locked state) */
1943 sema_init(&driver_data.device_allocation, 0);
1945 return platform_driver_register(&hash_driver);
1949 * ux500_hash_mod_fini - The kernel module exit function.
1951 static void __exit ux500_hash_mod_fini(void)
1953 platform_driver_unregister(&hash_driver);
1956 module_init(ux500_hash_mod_init);
1957 module_exit(ux500_hash_mod_fini);
1959 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1960 MODULE_LICENSE("GPL");
1962 MODULE_ALIAS_CRYPTO("sha1-all");
1963 MODULE_ALIAS_CRYPTO("sha256-all");
1964 MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1965 MODULE_ALIAS_CRYPTO("hmac-sha256-all");