#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
+#include <crypto/engine.h>
/*
* crypto alg
* per-session context
*/
struct caam_ctx {
+ struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
unsigned int authsize;
};
+struct caam_skcipher_req_ctx {
+ struct skcipher_edesc *edesc;
+};
+
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
* @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
{
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
+ edesc = rctx->edesc;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
kfree(edesc);
- skcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ skcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_skcipher_request(jrp->engine, req, ecode);
}
/*
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
+ rctx->edesc = edesc;
/* Make sure IV is located in a DMAable area */
if (ivsize) {
return edesc;
}
+static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 *desc = rctx->edesc->hw_desc;
+ int ret;
+
+ rctx->edesc->bklog = true;
+
+ ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
+
+ if (ret != -EINPROGRESS) {
+ skcipher_unmap(ctx->jrdev, rctx->edesc, req);
+ kfree(rctx->edesc);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
u32 *desc;
int ret = 0;
desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
- if (ret != -EINPROGRESS) {
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
{
dma_addr_t dma_addr;
struct caam_drv_private *priv;
+ const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
+ sh_desc_enc);
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
- sh_desc_enc_dma),
+ sh_desc_enc_dma) -
+ sh_desc_enc_offset,
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_dec);
- ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+ sh_desc_dec) -
+ sh_desc_enc_offset;
+ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
+ sh_desc_enc_offset;
/* copy descriptor header template value */
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+
+ ctx->enginectx.op.do_one_request = skcipher_do_one_req;
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
false);
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
- offsetof(struct caam_ctx, sh_desc_enc_dma),
+ offsetof(struct caam_ctx, sh_desc_enc_dma) -
+ offsetof(struct caam_ctx, sh_desc_enc),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}