Lines Matching +full:clock +full:- +full:skip
1 // SPDX-License-Identifier: GPL-2.0
10 // Hash part based on omap-sham.c driver.
14 #include <linux/dma-mapping.h>
145 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
149 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
211 * HASH bit numbers, used by device, setting in dev->hash_flags with
230 * struct samsung_aes_variant - platform specific SSS driver data
258 * struct s5p_aes_dev - Crypto device state container
260 * @clk: Clock for accessing hardware
261 * @pclk: APB bus clock necessary to access the hardware
263 * @aes_ioaddr: Per-varian offset for AES block IO memory
268 * in device. This is DMA-mapped into device.
270 * in device. This is DMA-mapped into device.
284 * @io_hash_base: Per-variant offset for HASH block IO memory.
335 * struct s5p_hash_reqctx - HASH request context
344 * @sgl: sg for joining buffer and req->src scatterlist
345 * @skip: Skip offset in req->src for current op
350 * @buffer: For byte(s) from end of req->src in UPDATE op
365 unsigned int skip; member
375 * struct s5p_hash_ctx - HASH transformation context
406 .compatible = "samsung,s5pv210-secss",
410 .compatible = "samsung,exynos4210-secss",
414 .compatible = "samsung,exynos5433-slim-sss",
424 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) in find_s5p_sss_version()
425 return of_device_get_match_data(&pdev->dev); in find_s5p_sss_version()
428 platform_get_device_id(pdev)->driver_data; in find_s5p_sss_version()
454 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE); in s5p_free_sg_cpy()
476 struct skcipher_request *req = dev->req; in s5p_sg_done()
479 if (dev->sg_dst_cpy) { in s5p_sg_done()
480 dev_dbg(dev->dev, in s5p_sg_done()
482 dev->req->cryptlen); in s5p_sg_done()
483 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, in s5p_sg_done()
484 dev->req->cryptlen, 1); in s5p_sg_done()
486 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); in s5p_sg_done()
487 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); in s5p_sg_done()
488 if (reqctx->mode & FLAGS_AES_CBC) in s5p_sg_done()
489 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE); in s5p_sg_done()
491 else if (reqctx->mode & FLAGS_AES_CTR) in s5p_sg_done()
492 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE); in s5p_sg_done()
495 /* Calls the completion. Cannot be called with dev->lock hold. */
503 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); in s5p_unset_outdata()
508 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); in s5p_unset_indata()
519 return -ENOMEM; in s5p_make_sg_cpy()
521 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE); in s5p_make_sg_cpy()
526 return -ENOMEM; in s5p_make_sg_cpy()
529 s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0); in s5p_make_sg_cpy()
539 if (!sg->length) in s5p_set_outdata()
540 return -EINVAL; in s5p_set_outdata()
542 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE)) in s5p_set_outdata()
543 return -ENOMEM; in s5p_set_outdata()
545 dev->sg_dst = sg; in s5p_set_outdata()
552 if (!sg->length) in s5p_set_indata()
553 return -EINVAL; in s5p_set_indata()
555 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE)) in s5p_set_indata()
556 return -ENOMEM; in s5p_set_indata()
558 dev->sg_src = sg; in s5p_set_indata()
564 * Returns -ERRNO on error (mapping of new data failed).
566 * - 0 if there is no more data,
567 * - 1 if new transmitting (output) data is ready and its address+length
576 if (!sg_is_last(dev->sg_dst)) { in s5p_aes_tx()
577 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); in s5p_aes_tx()
586 * Returns -ERRNO on error (mapping of new data failed).
588 * - 0 if there is no more data,
589 * - 1 if new receiving (input) data is ready and its address+length
598 if (!sg_is_last(dev->sg_src)) { in s5p_aes_rx()
599 ret = s5p_set_indata(dev, sg_next(dev->sg_src)); in s5p_aes_rx()
609 return __raw_readl(dd->io_hash_base + offset); in s5p_hash_read()
615 __raw_writel(value, dd->io_hash_base + offset); in s5p_hash_write()
619 * s5p_set_dma_hashdata() - start DMA with sg
626 dev->hash_sg_cnt--; in s5p_set_dma_hashdata()
632 * s5p_hash_rx() - get next hash_sg_iter
642 if (dev->hash_sg_cnt > 0) { in s5p_hash_rx()
643 dev->hash_sg_iter = sg_next(dev->hash_sg_iter); in s5p_hash_rx()
647 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags); in s5p_hash_rx()
648 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags)) in s5p_hash_rx()
668 spin_lock_irqsave(&dev->lock, flags); in s5p_aes_interrupt()
685 if (sg_is_last(dev->sg_dst)) in s5p_aes_interrupt()
707 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags); in s5p_aes_interrupt()
726 s5p_set_dma_hashdata(dev, dev->hash_sg_iter); in s5p_aes_interrupt()
728 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_interrupt()
730 s5p_aes_complete(dev->req, 0); in s5p_aes_interrupt()
732 tasklet_schedule(&dev->tasklet); in s5p_aes_interrupt()
741 s5p_set_dma_outdata(dev, dev->sg_dst); in s5p_aes_interrupt()
743 s5p_set_dma_indata(dev, dev->sg_src); in s5p_aes_interrupt()
745 s5p_set_dma_hashdata(dev, dev->hash_sg_iter); in s5p_aes_interrupt()
747 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_interrupt()
754 dev->busy = false; in s5p_aes_interrupt()
755 req = dev->req; in s5p_aes_interrupt()
757 s5p_set_dma_hashdata(dev, dev->hash_sg_iter); in s5p_aes_interrupt()
759 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_interrupt()
769 tasklet_schedule(&dev->hash_tasklet); in s5p_aes_interrupt()
778 * s5p_hash_read_msg() - read message or IV from HW
784 struct s5p_aes_dev *dd = ctx->dd; in s5p_hash_read_msg()
785 u32 *hash = (u32 *)ctx->digest; in s5p_hash_read_msg()
788 for (i = 0; i < ctx->nregs; i++) in s5p_hash_read_msg()
793 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
800 const u32 *hash = (const u32 *)ctx->digest; in s5p_hash_write_ctx_iv()
803 for (i = 0; i < ctx->nregs; i++) in s5p_hash_write_ctx_iv()
808 * s5p_hash_write_iv() - write IV for next partial/finup op.
815 s5p_hash_write_ctx_iv(ctx->dd, ctx); in s5p_hash_write_iv()
819 * s5p_hash_copy_result() - copy digest into req->result
826 if (!req->result) in s5p_hash_copy_result()
829 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF); in s5p_hash_copy_result()
833 * s5p_hash_dma_flush() - flush HASH DMA
842 * s5p_hash_dma_enable() - enable DMA mode for HASH
853 * s5p_hash_irq_disable() - disable irq HASH signals
863 * s5p_hash_irq_enable() - enable irq signals
873 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
882 spin_lock_irqsave(&dev->lock, flags); in s5p_hash_set_flow()
889 spin_unlock_irqrestore(&dev->lock, flags); in s5p_hash_set_flow()
893 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
915 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
931 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); in s5p_hash_write_ctrl()
936 configflags = ctx->engine | SSS_HASH_INIT_BIT; in s5p_hash_write_ctrl()
938 if (likely(ctx->digcnt)) { in s5p_hash_write_ctrl()
948 tmplen = ctx->digcnt * 8; in s5p_hash_write_ctrl()
971 * s5p_hash_xmit_dma() - start DMA hash processing
981 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); in s5p_hash_xmit_dma()
984 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); in s5p_hash_xmit_dma()
986 dev_err(dd->dev, "dma_map_sg error\n"); in s5p_hash_xmit_dma()
987 ctx->error = true; in s5p_hash_xmit_dma()
988 return -EINVAL; in s5p_hash_xmit_dma()
991 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); in s5p_hash_xmit_dma()
992 dd->hash_sg_iter = ctx->sg; in s5p_hash_xmit_dma()
993 dd->hash_sg_cnt = cnt; in s5p_hash_xmit_dma()
995 ctx->digcnt += length; in s5p_hash_xmit_dma()
996 ctx->total -= length; in s5p_hash_xmit_dma()
1000 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags); in s5p_hash_xmit_dma()
1002 s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */ in s5p_hash_xmit_dma()
1004 return -EINPROGRESS; in s5p_hash_xmit_dma()
1008 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
1017 * Set bit in dd->hash_flag so we can free it after irq ends processing.
1025 len = new_len + ctx->bufcnt; in s5p_hash_copy_sgs()
1030 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n"); in s5p_hash_copy_sgs()
1031 ctx->error = true; in s5p_hash_copy_sgs()
1032 return -ENOMEM; in s5p_hash_copy_sgs()
1035 if (ctx->bufcnt) in s5p_hash_copy_sgs()
1036 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); in s5p_hash_copy_sgs()
1038 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, in s5p_hash_copy_sgs()
1040 sg_init_table(ctx->sgl, 1); in s5p_hash_copy_sgs()
1041 sg_set_buf(ctx->sgl, buf, len); in s5p_hash_copy_sgs()
1042 ctx->sg = ctx->sgl; in s5p_hash_copy_sgs()
1043 ctx->sg_len = 1; in s5p_hash_copy_sgs()
1044 ctx->bufcnt = 0; in s5p_hash_copy_sgs()
1045 ctx->skip = 0; in s5p_hash_copy_sgs()
1046 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags); in s5p_hash_copy_sgs()
1052 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
1059 * source sg into it, adjusting begin and/or end for skip offset and
1062 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1068 unsigned int skip = ctx->skip, n = sg_nents(sg); in s5p_hash_copy_sg_lists() local
1072 if (ctx->bufcnt) in s5p_hash_copy_sg_lists()
1075 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); in s5p_hash_copy_sg_lists()
1076 if (!ctx->sg) { in s5p_hash_copy_sg_lists()
1077 ctx->error = true; in s5p_hash_copy_sg_lists()
1078 return -ENOMEM; in s5p_hash_copy_sg_lists()
1081 sg_init_table(ctx->sg, n); in s5p_hash_copy_sg_lists()
1083 tmp = ctx->sg; in s5p_hash_copy_sg_lists()
1085 ctx->sg_len = 0; in s5p_hash_copy_sg_lists()
1087 if (ctx->bufcnt) { in s5p_hash_copy_sg_lists()
1088 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); in s5p_hash_copy_sg_lists()
1090 ctx->sg_len++; in s5p_hash_copy_sg_lists()
1093 while (sg && skip >= sg->length) { in s5p_hash_copy_sg_lists()
1094 skip -= sg->length; in s5p_hash_copy_sg_lists()
1099 len = sg->length - skip; in s5p_hash_copy_sg_lists()
1103 new_len -= len; in s5p_hash_copy_sg_lists()
1104 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip); in s5p_hash_copy_sg_lists()
1105 skip = 0; in s5p_hash_copy_sg_lists()
1110 ctx->sg_len++; in s5p_hash_copy_sg_lists()
1114 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags); in s5p_hash_copy_sg_lists()
1120 * s5p_hash_prepare_sgs() - prepare sg for processing
1139 unsigned int skip = ctx->skip, nbytes = new_len, n = 0; in s5p_hash_prepare_sgs() local
1143 if (!sg || !sg->length || !new_len) in s5p_hash_prepare_sgs()
1146 if (skip || !final) in s5p_hash_prepare_sgs()
1151 if (skip >= sg_tmp->length) { in s5p_hash_prepare_sgs()
1152 skip -= sg_tmp->length; in s5p_hash_prepare_sgs()
1153 if (!sg_tmp->length) { in s5p_hash_prepare_sgs()
1158 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) { in s5p_hash_prepare_sgs()
1163 if (nbytes < sg_tmp->length - skip) { in s5p_hash_prepare_sgs()
1168 nbytes -= sg_tmp->length - skip; in s5p_hash_prepare_sgs()
1169 skip = 0; in s5p_hash_prepare_sgs()
1184 if (ctx->bufcnt) { in s5p_hash_prepare_sgs()
1185 ctx->sg_len = n; in s5p_hash_prepare_sgs()
1186 sg_init_table(ctx->sgl, 2); in s5p_hash_prepare_sgs()
1187 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt); in s5p_hash_prepare_sgs()
1188 sg_chain(ctx->sgl, 2, sg); in s5p_hash_prepare_sgs()
1189 ctx->sg = ctx->sgl; in s5p_hash_prepare_sgs()
1190 ctx->sg_len++; in s5p_hash_prepare_sgs()
1192 ctx->sg = sg; in s5p_hash_prepare_sgs()
1193 ctx->sg_len = n; in s5p_hash_prepare_sgs()
1200 * s5p_hash_prepare_request() - prepare request for processing
1206 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1212 bool final = ctx->finup; in s5p_hash_prepare_request()
1217 nbytes = req->nbytes; in s5p_hash_prepare_request()
1221 ctx->total = nbytes + ctx->bufcnt; in s5p_hash_prepare_request()
1222 if (!ctx->total) in s5p_hash_prepare_request()
1225 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) { in s5p_hash_prepare_request()
1227 int len = BUFLEN - ctx->bufcnt % BUFLEN; in s5p_hash_prepare_request()
1232 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, in s5p_hash_prepare_request()
1234 ctx->bufcnt += len; in s5p_hash_prepare_request()
1235 nbytes -= len; in s5p_hash_prepare_request()
1236 ctx->skip = len; in s5p_hash_prepare_request()
1238 ctx->skip = 0; in s5p_hash_prepare_request()
1241 if (ctx->bufcnt) in s5p_hash_prepare_request()
1242 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt); in s5p_hash_prepare_request()
1244 xmit_len = ctx->total; in s5p_hash_prepare_request()
1249 xmit_len -= BUFLEN; in s5p_hash_prepare_request()
1251 xmit_len -= xmit_len & (BUFLEN - 1); in s5p_hash_prepare_request()
1253 hash_later = ctx->total - xmit_len; in s5p_hash_prepare_request()
1254 /* copy hash_later bytes from end of req->src */ in s5p_hash_prepare_request()
1256 scatterwalk_map_and_copy(ctx->buffer, req->src, in s5p_hash_prepare_request()
1257 req->nbytes - hash_later, in s5p_hash_prepare_request()
1262 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later, in s5p_hash_prepare_request()
1268 if (unlikely(!ctx->bufcnt)) { in s5p_hash_prepare_request()
1270 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, in s5p_hash_prepare_request()
1274 sg_init_table(ctx->sgl, 1); in s5p_hash_prepare_request()
1275 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len); in s5p_hash_prepare_request()
1277 ctx->sg = ctx->sgl; in s5p_hash_prepare_request()
1278 ctx->sg_len = 1; in s5p_hash_prepare_request()
1281 ctx->bufcnt = hash_later; in s5p_hash_prepare_request()
1283 ctx->total = xmit_len; in s5p_hash_prepare_request()
1289 * s5p_hash_update_dma_stop() - unmap DMA
1292 * Unmap scatterlist ctx->sg.
1296 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); in s5p_hash_update_dma_stop()
1298 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); in s5p_hash_update_dma_stop()
1299 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); in s5p_hash_update_dma_stop()
1303 * s5p_hash_finish() - copy calculated digest to crypto layer
1309 struct s5p_aes_dev *dd = ctx->dd; in s5p_hash_finish()
1311 if (ctx->digcnt) in s5p_hash_finish()
1314 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt); in s5p_hash_finish()
1318 * s5p_hash_finish_req() - finish request
1325 struct s5p_aes_dev *dd = ctx->dd; in s5p_hash_finish_req()
1328 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags)) in s5p_hash_finish_req()
1329 free_pages((unsigned long)sg_virt(ctx->sg), in s5p_hash_finish_req()
1330 get_order(ctx->sg->length)); in s5p_hash_finish_req()
1332 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags)) in s5p_hash_finish_req()
1333 kfree(ctx->sg); in s5p_hash_finish_req()
1335 ctx->sg = NULL; in s5p_hash_finish_req()
1336 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) | in s5p_hash_finish_req()
1339 if (!err && !ctx->error) { in s5p_hash_finish_req()
1341 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags)) in s5p_hash_finish_req()
1344 ctx->error = true; in s5p_hash_finish_req()
1347 spin_lock_irqsave(&dd->hash_lock, flags); in s5p_hash_finish_req()
1348 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) | in s5p_hash_finish_req()
1351 spin_unlock_irqrestore(&dd->hash_lock, flags); in s5p_hash_finish_req()
1353 if (req->base.complete) in s5p_hash_finish_req()
1358 * s5p_hash_handle_queue() - handle hash queue
1362 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
1363 * device then processes the first request from the dd->queue
1376 spin_lock_irqsave(&dd->hash_lock, flags); in s5p_hash_handle_queue()
1378 ret = ahash_enqueue_request(&dd->hash_queue, req); in s5p_hash_handle_queue()
1380 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { in s5p_hash_handle_queue()
1381 spin_unlock_irqrestore(&dd->hash_lock, flags); in s5p_hash_handle_queue()
1385 backlog = crypto_get_backlog(&dd->hash_queue); in s5p_hash_handle_queue()
1386 async_req = crypto_dequeue_request(&dd->hash_queue); in s5p_hash_handle_queue()
1388 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags); in s5p_hash_handle_queue()
1390 spin_unlock_irqrestore(&dd->hash_lock, flags); in s5p_hash_handle_queue()
1396 crypto_request_complete(backlog, -EINPROGRESS); in s5p_hash_handle_queue()
1399 dd->hash_req = req; in s5p_hash_handle_queue()
1402 err = s5p_hash_prepare_request(req, ctx->op_update); in s5p_hash_handle_queue()
1403 if (err || !ctx->total) in s5p_hash_handle_queue()
1406 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n", in s5p_hash_handle_queue()
1407 ctx->op_update, req->nbytes); in s5p_hash_handle_queue()
1410 if (ctx->digcnt) in s5p_hash_handle_queue()
1413 if (ctx->op_update) { /* HASH_OP_UPDATE */ in s5p_hash_handle_queue()
1414 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup); in s5p_hash_handle_queue()
1415 if (err != -EINPROGRESS && ctx->finup && !ctx->error) in s5p_hash_handle_queue()
1417 err = s5p_hash_xmit_dma(dd, ctx->total, true); in s5p_hash_handle_queue()
1419 err = s5p_hash_xmit_dma(dd, ctx->total, true); in s5p_hash_handle_queue()
1422 if (err != -EINPROGRESS) { in s5p_hash_handle_queue()
1438 * s5p_hash_tasklet_cb() - hash tasklet
1445 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { in s5p_hash_tasklet_cb()
1450 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) { in s5p_hash_tasklet_cb()
1452 &dd->hash_flags)) { in s5p_hash_tasklet_cb()
1457 &dd->hash_flags)) { in s5p_hash_tasklet_cb()
1458 /* hash or semi-hash ready */ in s5p_hash_tasklet_cb()
1459 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); in s5p_hash_tasklet_cb()
1468 s5p_hash_finish_req(dd->hash_req, 0); in s5p_hash_tasklet_cb()
1471 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) in s5p_hash_tasklet_cb()
1476 * s5p_hash_enqueue() - enqueue request
1485 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in s5p_hash_enqueue()
1487 ctx->op_update = op; in s5p_hash_enqueue()
1489 return s5p_hash_handle_queue(tctx->dd, req); in s5p_hash_enqueue()
1493 * s5p_hash_update() - process the hash input data
1505 if (!req->nbytes) in s5p_hash_update()
1508 if (ctx->bufcnt + req->nbytes <= BUFLEN) { in s5p_hash_update()
1509 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, in s5p_hash_update()
1510 0, req->nbytes, 0); in s5p_hash_update()
1511 ctx->bufcnt += req->nbytes; in s5p_hash_update()
1519 * s5p_hash_final() - close up hash and calculate digest
1522 * Note: in final req->src do not have any data, and req->nbytes can be
1523 * non-zero.
1531 * previous update op, so there are always some buffered bytes in ctx->buffer,
1532 * which means that ctx->bufcnt!=0
1536 * -EINPROGRESS if the operation has been queued for later execution or is set
1538 * -EBUSY if queue is full and request should be resubmitted later,
1545 ctx->finup = true; in s5p_hash_final()
1546 if (ctx->error) in s5p_hash_final()
1547 return -EINVAL; /* uncompleted hash is not needed */ in s5p_hash_final()
1549 if (!ctx->digcnt && ctx->bufcnt < BUFLEN) { in s5p_hash_final()
1550 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in s5p_hash_final()
1552 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer, in s5p_hash_final()
1553 ctx->bufcnt, req->result); in s5p_hash_final()
1560 * s5p_hash_finup() - process last req->src and calculate digest
1570 ctx->finup = true; in s5p_hash_finup()
1573 if (err1 == -EINPROGRESS || err1 == -EBUSY) in s5p_hash_finup()
1587 * s5p_hash_init() - initialize AHASH request contex
1598 ctx->dd = tctx->dd; in s5p_hash_init()
1599 ctx->error = false; in s5p_hash_init()
1600 ctx->finup = false; in s5p_hash_init()
1601 ctx->bufcnt = 0; in s5p_hash_init()
1602 ctx->digcnt = 0; in s5p_hash_init()
1603 ctx->total = 0; in s5p_hash_init()
1604 ctx->skip = 0; in s5p_hash_init()
1606 dev_dbg(tctx->dd->dev, "init: digest size: %d\n", in s5p_hash_init()
1611 ctx->engine = SSS_HASH_ENGINE_MD5; in s5p_hash_init()
1612 ctx->nregs = HASH_MD5_MAX_REG; in s5p_hash_init()
1615 ctx->engine = SSS_HASH_ENGINE_SHA1; in s5p_hash_init()
1616 ctx->nregs = HASH_SHA1_MAX_REG; in s5p_hash_init()
1619 ctx->engine = SSS_HASH_ENGINE_SHA256; in s5p_hash_init()
1620 ctx->nregs = HASH_SHA256_MAX_REG; in s5p_hash_init()
1623 ctx->error = true; in s5p_hash_init()
1624 return -EINVAL; in s5p_hash_init()
1631 * s5p_hash_digest - calculate digest from req->src
1642 * s5p_hash_cra_init_alg - init crypto alg transformation
1650 tctx->dd = s5p_dev; in s5p_hash_cra_init_alg()
1652 tctx->fallback = crypto_alloc_shash(alg_name, 0, in s5p_hash_cra_init_alg()
1654 if (IS_ERR(tctx->fallback)) { in s5p_hash_cra_init_alg()
1656 return PTR_ERR(tctx->fallback); in s5p_hash_cra_init_alg()
1666 * s5p_hash_cra_init - init crypto tfm
1675 * s5p_hash_cra_exit - exit crypto tfm
1684 crypto_free_shash(tctx->fallback); in s5p_hash_cra_exit()
1685 tctx->fallback = NULL; in s5p_hash_cra_exit()
1689 * s5p_hash_export - export hash state
1697 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt); in s5p_hash_export()
1703 * s5p_hash_import - import hash state
1715 if (ctx_in->bufcnt > BUFLEN) { in s5p_hash_import()
1716 ctx->error = true; in s5p_hash_import()
1717 return -EINVAL; in s5p_hash_import()
1720 ctx->dd = tctx->dd; in s5p_hash_import()
1721 ctx->error = false; in s5p_hash_import()
1739 .cra_driver_name = "exynos-sha1",
1763 .cra_driver_name = "exynos-md5",
1787 .cra_driver_name = "exynos-sha256",
1809 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, in s5p_set_aes()
1813 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, in s5p_set_aes()
1817 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); in s5p_set_aes()
1819 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); in s5p_set_aes()
1821 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); in s5p_set_aes()
1829 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) in s5p_is_sg_aligned()
1843 dev->sg_src_cpy = NULL; in s5p_set_indata_start()
1844 sg = req->src; in s5p_set_indata_start()
1846 dev_dbg(dev->dev, in s5p_set_indata_start()
1848 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy); in s5p_set_indata_start()
1852 sg = dev->sg_src_cpy; in s5p_set_indata_start()
1857 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); in s5p_set_indata_start()
1870 dev->sg_dst_cpy = NULL; in s5p_set_outdata_start()
1871 sg = req->dst; in s5p_set_outdata_start()
1873 dev_dbg(dev->dev, in s5p_set_outdata_start()
1875 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy); in s5p_set_outdata_start()
1879 sg = dev->sg_dst_cpy; in s5p_set_outdata_start()
1884 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); in s5p_set_outdata_start()
1893 struct skcipher_request *req = dev->req; in s5p_aes_crypt_start()
1899 /* This sets bit [13:12] to 00, which selects 128-bit counter */ in s5p_aes_crypt_start()
1906 iv = req->iv; in s5p_aes_crypt_start()
1911 ctr = req->iv; in s5p_aes_crypt_start()
1917 if (dev->ctx->keylen == AES_KEYSIZE_192) in s5p_aes_crypt_start()
1919 else if (dev->ctx->keylen == AES_KEYSIZE_256) in s5p_aes_crypt_start()
1931 spin_lock_irqsave(&dev->lock, flags); in s5p_aes_crypt_start()
1946 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen); in s5p_aes_crypt_start()
1948 s5p_set_dma_indata(dev, dev->sg_src); in s5p_aes_crypt_start()
1949 s5p_set_dma_outdata(dev, dev->sg_dst); in s5p_aes_crypt_start()
1954 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_crypt_start()
1963 dev->busy = false; in s5p_aes_crypt_start()
1964 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_crypt_start()
1975 spin_lock_irqsave(&dev->lock, flags); in s5p_tasklet_cb()
1976 backlog = crypto_get_backlog(&dev->queue); in s5p_tasklet_cb()
1977 async_req = crypto_dequeue_request(&dev->queue); in s5p_tasklet_cb()
1980 dev->busy = false; in s5p_tasklet_cb()
1981 spin_unlock_irqrestore(&dev->lock, flags); in s5p_tasklet_cb()
1984 spin_unlock_irqrestore(&dev->lock, flags); in s5p_tasklet_cb()
1987 crypto_request_complete(backlog, -EINPROGRESS); in s5p_tasklet_cb()
1989 dev->req = skcipher_request_cast(async_req); in s5p_tasklet_cb()
1990 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); in s5p_tasklet_cb()
1991 reqctx = skcipher_request_ctx(dev->req); in s5p_tasklet_cb()
1993 s5p_aes_crypt_start(dev, reqctx->mode); in s5p_tasklet_cb()
2002 spin_lock_irqsave(&dev->lock, flags); in s5p_aes_handle_req()
2003 err = crypto_enqueue_request(&dev->queue, &req->base); in s5p_aes_handle_req()
2004 if (dev->busy) { in s5p_aes_handle_req()
2005 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_handle_req()
2008 dev->busy = true; in s5p_aes_handle_req()
2010 spin_unlock_irqrestore(&dev->lock, flags); in s5p_aes_handle_req()
2012 tasklet_schedule(&dev->tasklet); in s5p_aes_handle_req()
2022 struct s5p_aes_dev *dev = ctx->dev; in s5p_aes_crypt()
2024 if (!req->cryptlen) in s5p_aes_crypt()
2027 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) && in s5p_aes_crypt()
2029 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n"); in s5p_aes_crypt()
2030 return -EINVAL; in s5p_aes_crypt()
2033 reqctx->mode = mode; in s5p_aes_crypt()
2047 return -EINVAL; in s5p_aes_setkey()
2049 memcpy(ctx->aes_key, key, keylen); in s5p_aes_setkey()
2050 ctx->keylen = keylen; in s5p_aes_setkey()
2084 ctx->dev = s5p_dev; in s5p_aes_init_tfm()
2093 .base.cra_driver_name = "ecb-aes-s5p",
2111 .base.cra_driver_name = "cbc-aes-s5p",
2130 .base.cra_driver_name = "ctr-aes-s5p",
2151 struct device *dev = &pdev->dev; in s5p_aes_probe()
2159 return -EEXIST; in s5p_aes_probe()
2163 return -ENOMEM; in s5p_aes_probe()
2168 return -EINVAL; in s5p_aes_probe()
2178 res->end += 0x300; in s5p_aes_probe()
2179 pdata->use_hash = true; in s5p_aes_probe()
2183 pdata->res = res; in s5p_aes_probe()
2184 pdata->ioaddr = devm_ioremap_resource(dev, res); in s5p_aes_probe()
2185 if (IS_ERR(pdata->ioaddr)) { in s5p_aes_probe()
2186 if (!pdata->use_hash) in s5p_aes_probe()
2187 return PTR_ERR(pdata->ioaddr); in s5p_aes_probe()
2189 res->end -= 0x300; in s5p_aes_probe()
2190 pdata->use_hash = false; in s5p_aes_probe()
2191 pdata->ioaddr = devm_ioremap_resource(dev, res); in s5p_aes_probe()
2192 if (IS_ERR(pdata->ioaddr)) in s5p_aes_probe()
2193 return PTR_ERR(pdata->ioaddr); in s5p_aes_probe()
2196 pdata->clk = devm_clk_get(dev, variant->clk_names[0]); in s5p_aes_probe()
2197 if (IS_ERR(pdata->clk)) in s5p_aes_probe()
2198 return dev_err_probe(dev, PTR_ERR(pdata->clk), in s5p_aes_probe()
2199 "failed to find secss clock %s\n", in s5p_aes_probe()
2200 variant->clk_names[0]); in s5p_aes_probe()
2202 err = clk_prepare_enable(pdata->clk); in s5p_aes_probe()
2204 dev_err(dev, "Enabling clock %s failed, err %d\n", in s5p_aes_probe()
2205 variant->clk_names[0], err); in s5p_aes_probe()
2209 if (variant->clk_names[1]) { in s5p_aes_probe()
2210 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]); in s5p_aes_probe()
2211 if (IS_ERR(pdata->pclk)) { in s5p_aes_probe()
2212 err = dev_err_probe(dev, PTR_ERR(pdata->pclk), in s5p_aes_probe()
2213 "failed to find clock %s\n", in s5p_aes_probe()
2214 variant->clk_names[1]); in s5p_aes_probe()
2218 err = clk_prepare_enable(pdata->pclk); in s5p_aes_probe()
2220 dev_err(dev, "Enabling clock %s failed, err %d\n", in s5p_aes_probe()
2221 variant->clk_names[0], err); in s5p_aes_probe()
2225 pdata->pclk = NULL; in s5p_aes_probe()
2228 spin_lock_init(&pdata->lock); in s5p_aes_probe()
2229 spin_lock_init(&pdata->hash_lock); in s5p_aes_probe()
2231 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; in s5p_aes_probe()
2232 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset; in s5p_aes_probe()
2234 pdata->irq_fc = platform_get_irq(pdev, 0); in s5p_aes_probe()
2235 if (pdata->irq_fc < 0) { in s5p_aes_probe()
2236 err = pdata->irq_fc; in s5p_aes_probe()
2240 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, in s5p_aes_probe()
2242 pdev->name, pdev); in s5p_aes_probe()
2248 pdata->busy = false; in s5p_aes_probe()
2249 pdata->dev = dev; in s5p_aes_probe()
2253 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); in s5p_aes_probe()
2254 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); in s5p_aes_probe()
2262 if (pdata->use_hash) { in s5p_aes_probe()
2263 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb, in s5p_aes_probe()
2265 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH); in s5p_aes_probe()
2275 alg->halg.base.cra_driver_name, err); in s5p_aes_probe()
2281 dev_info(dev, "s5p-sss driver registered\n"); in s5p_aes_probe()
2286 for (j = hash_i - 1; j >= 0; j--) in s5p_aes_probe()
2289 tasklet_kill(&pdata->hash_tasklet); in s5p_aes_probe()
2290 res->end -= 0x300; in s5p_aes_probe()
2300 tasklet_kill(&pdata->tasklet); in s5p_aes_probe()
2303 clk_disable_unprepare(pdata->pclk); in s5p_aes_probe()
2306 clk_disable_unprepare(pdata->clk); in s5p_aes_probe()
2320 tasklet_kill(&pdata->tasklet); in s5p_aes_remove()
2321 if (pdata->use_hash) { in s5p_aes_remove()
2322 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--) in s5p_aes_remove()
2325 pdata->res->end -= 0x300; in s5p_aes_remove()
2326 tasklet_kill(&pdata->hash_tasklet); in s5p_aes_remove()
2327 pdata->use_hash = false; in s5p_aes_remove()
2330 clk_disable_unprepare(pdata->pclk); in s5p_aes_remove()
2332 clk_disable_unprepare(pdata->clk); in s5p_aes_remove()
2340 .name = "s5p-secss",