aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
aes->dst.nents, DMA_FROM_DEVICE);
if (unlikely(!aes->dst.sg_len)) {
- dma_unmap_sg(cryp->dev, aes->src.sg,
- aes->src.nents, DMA_TO_DEVICE);
+ dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
+ DMA_TO_DEVICE);
goto sg_map_err;
}
}
const u8 *key, u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const u32 *key_tmp = (const u32 *)key;
+ const u32 *aes_key = (const u32 *)key;
u32 *key_state = ctx->tfm.state;
int i;
ctx->keylen = SIZE_IN_WORDS(keylen);
for (i = 0; i < ctx->keylen; i++)
- key_state[i] = cpu_to_le32(key_tmp[i]);
+ key_state[i] = cpu_to_le32(aes_key[i]);
return 0;
}
rctx = ablkcipher_request_ctx(req);
rctx->mode = mode;
- return mtk_aes_handle_queue(ctx->cryp,
- !(mode & AES_FLAGS_ENCRYPT), &req->base);
+ return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
+ &req->base);
}
-static int mtk_ecb_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
}
-static int mtk_ecb_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ECB);
}
-static int mtk_cbc_encrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
}
-static int mtk_cbc_decrypt(struct ablkcipher_request *req)
+static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
{
return mtk_aes_crypt(req, AES_FLAGS_CBC);
}
static struct crypto_alg aes_algs[] = {
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 15,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_cbc_encrypt,
- .decrypt = mtk_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-mtk",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_init = mtk_aes_cra_init,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_cbc_encrypt,
+ .decrypt = mtk_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
}
},
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_init = mtk_aes_cra_init,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .cra_alignmask = 15,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_ecb_encrypt,
- .decrypt = mtk_ecb_decrypt,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-mtk",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_init = mtk_aes_cra_init,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mtk_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mtk_aes_setkey,
+ .encrypt = mtk_aes_ecb_encrypt,
+ .decrypt = mtk_aes_ecb_decrypt,
}
},
};
* Update input data length field of transform information and
* map it to DMA region.
*/
-static int mtk_sha_info_map(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- size_t len)
+static int mtk_sha_info_update(struct mtk_cryp *cryp,
+ struct mtk_sha_rec *sha,
+ size_t len)
{
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_info *info = &ctx->info;
ctx->digcnt += len;
ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
return -EINVAL;
struct mtk_desc *res = ring->res_base + ring->res_pos;
int err;
- err = mtk_sha_info_map(cryp, sha, len);
+ err = mtk_sha_info_update(cryp, sha, len);
if (err)
return err;
/* Fill in the command/result descriptors */
- res->hdr = MTK_DESC_FIRST |
- MTK_DESC_LAST |
- MTK_DESC_BUF_LEN(len);
-
+ res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
res->buf = cpu_to_le32(cryp->tmp_dma);
- cmd->hdr = MTK_DESC_FIRST |
- MTK_DESC_LAST |
- MTK_DESC_BUF_LEN(len) |
+ cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(addr);
struct mtk_desc *res = ring->res_base + ring->res_pos;
int err;
- err = mtk_sha_info_map(cryp, sha, len1 + len2);
+ err = mtk_sha_info_update(cryp, sha, len1 + len2);
if (err)
return err;
res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
res->buf = cpu_to_le32(cryp->tmp_dma);
- cmd->hdr = MTK_DESC_BUF_LEN(len1) |
- MTK_DESC_FIRST |
+ cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST |
MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
cmd->ct = cpu_to_le32(ctx->ct_dma);
size_t count)
{
ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
+ SHA_BUF_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
dev_err(cryp->dev, "dma map error\n");
return -EINVAL;
mtk_sha_fill_padding(ctx, len);
ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
+ SHA_BUF_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
dev_err(cryp->dev, "dma map bytes error\n");
return -EINVAL;
static int mtk_sha_final_req(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha)
{
- struct ahash_request *req = sha->req;
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
size_t count;
mtk_sha_fill_padding(ctx, 0);
}
static void mtk_sha_finish_req(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha, int err)
+ struct mtk_sha_rec *sha,
+ int err)
{
if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
err = mtk_sha_finish(sha->req);
return mtk_sha_init(req) ?: mtk_sha_finup(req);
}
-static int mtk_sha_setkey(struct crypto_ahash *tfm,
- const unsigned char *key, u32 keylen)
+static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ u32 keylen)
{
struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
struct mtk_sha_hmac_ctx *bctx = tctx->base;
shash->tfm = bctx->shash;
shash->flags = crypto_shash_get_flags(bctx->shash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);