Merge tag 'hid-for-linus-2023121201' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / crypto / caam / caamprng.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver to expose SEC4 PRNG via crypto RNG API
4  *
5  * Copyright 2022 NXP
6  *
7  */
8
9 #include <linux/completion.h>
10 #include <crypto/internal/rng.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/kernel.h>
13 #include "compat.h"
14 #include "regs.h"
15 #include "intern.h"
16 #include "desc_constr.h"
17 #include "jr.h"
18 #include "error.h"
19
20 /*
21  * Length of used descriptors, see caam_init_desc()
22  */
23 #define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ +                           \
24                             CAAM_CMD_SZ +                               \
25                             CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
26
27 /* prng per-device context */
28 struct caam_prng_ctx {
29         int err;
30         struct completion done;
31 };
32
33 struct caam_prng_alg {
34         struct rng_alg rng;
35         bool registered;
36 };
37
38 static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err,
39                           void *context)
40 {
41         struct caam_prng_ctx *jctx = context;
42
43         jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0;
44
45         complete(&jctx->done);
46 }
47
48 static u32 *caam_init_reseed_desc(u32 *desc)
49 {
50         init_job_desc(desc, 0); /* + 1 cmd_sz */
51         /* Generate random bytes: + 1 cmd_sz */
52         append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
53                         OP_ALG_AS_FINALIZE);
54
55         print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS,
56                              16, 4, desc, desc_bytes(desc), 1);
57
58         return desc;
59 }
60
61 static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len)
62 {
63         init_job_desc(desc, 0); /* + 1 cmd_sz */
64         /* Generate random bytes: + 1 cmd_sz */
65         append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
66         /* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
67         append_fifo_store(desc, dst_dma,
68                           len, FIFOST_TYPE_RNGSTORE);
69
70         print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS,
71                              16, 4, desc, desc_bytes(desc), 1);
72
73         return desc;
74 }
75
76 static int caam_prng_generate(struct crypto_rng *tfm,
77                              const u8 *src, unsigned int slen,
78                              u8 *dst, unsigned int dlen)
79 {
80         unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment());
81         struct caam_prng_ctx ctx;
82         struct device *jrdev;
83         dma_addr_t dst_dma;
84         u32 *desc;
85         u8 *buf;
86         int ret;
87
88         if (aligned_dlen < dlen)
89                 return -EOVERFLOW;
90
91         buf = kzalloc(aligned_dlen, GFP_KERNEL);
92         if (!buf)
93                 return -ENOMEM;
94
95         jrdev = caam_jr_alloc();
96         ret = PTR_ERR_OR_ZERO(jrdev);
97         if (ret) {
98                 pr_err("Job Ring Device allocation failed\n");
99                 kfree(buf);
100                 return ret;
101         }
102
103         desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
104         if (!desc) {
105                 ret = -ENOMEM;
106                 goto out1;
107         }
108
109         dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE);
110         if (dma_mapping_error(jrdev, dst_dma)) {
111                 dev_err(jrdev, "Failed to map destination buffer memory\n");
112                 ret = -ENOMEM;
113                 goto out;
114         }
115
116         init_completion(&ctx.done);
117         ret = caam_jr_enqueue(jrdev,
118                               caam_init_prng_desc(desc, dst_dma, dlen),
119                               caam_prng_done, &ctx);
120
121         if (ret == -EINPROGRESS) {
122                 wait_for_completion(&ctx.done);
123                 ret = ctx.err;
124         }
125
126         dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE);
127
128         if (!ret)
129                 memcpy(dst, buf, dlen);
130 out:
131         kfree(desc);
132 out1:
133         caam_jr_free(jrdev);
134         kfree(buf);
135         return ret;
136 }
137
138 static void caam_prng_exit(struct crypto_tfm *tfm) {}
139
140 static int caam_prng_init(struct crypto_tfm *tfm)
141 {
142         return 0;
143 }
144
145 static int caam_prng_seed(struct crypto_rng *tfm,
146                          const u8 *seed, unsigned int slen)
147 {
148         struct caam_prng_ctx ctx;
149         struct device *jrdev;
150         u32 *desc;
151         int ret;
152
153         if (slen) {
154                 pr_err("Seed length should be zero\n");
155                 return -EINVAL;
156         }
157
158         jrdev = caam_jr_alloc();
159         ret = PTR_ERR_OR_ZERO(jrdev);
160         if (ret) {
161                 pr_err("Job Ring Device allocation failed\n");
162                 return ret;
163         }
164
165         desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
166         if (!desc) {
167                 caam_jr_free(jrdev);
168                 return -ENOMEM;
169         }
170
171         init_completion(&ctx.done);
172         ret = caam_jr_enqueue(jrdev,
173                               caam_init_reseed_desc(desc),
174                               caam_prng_done, &ctx);
175
176         if (ret == -EINPROGRESS) {
177                 wait_for_completion(&ctx.done);
178                 ret = ctx.err;
179         }
180
181         kfree(desc);
182         caam_jr_free(jrdev);
183         return ret;
184 }
185
186 static struct caam_prng_alg caam_prng_alg = {
187         .rng = {
188                 .generate = caam_prng_generate,
189                 .seed = caam_prng_seed,
190                 .seedsize = 0,
191                 .base = {
192                         .cra_name = "stdrng",
193                         .cra_driver_name = "prng-caam",
194                         .cra_priority = 500,
195                         .cra_ctxsize = sizeof(struct caam_prng_ctx),
196                         .cra_module = THIS_MODULE,
197                         .cra_init = caam_prng_init,
198                         .cra_exit = caam_prng_exit,
199                 },
200         }
201 };
202
203 void caam_prng_unregister(void *data)
204 {
205         if (caam_prng_alg.registered)
206                 crypto_unregister_rng(&caam_prng_alg.rng);
207 }
208
209 int caam_prng_register(struct device *ctrldev)
210 {
211         struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
212         u32 rng_inst;
213         int ret = 0;
214
215         /* Check for available RNG blocks before registration */
216         if (priv->era < 10)
217                 rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
218                             CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
219         else
220                 rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
221
222         if (!rng_inst) {
223                 dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n");
224                 return ret;
225         }
226
227         ret = crypto_register_rng(&caam_prng_alg.rng);
228         if (ret) {
229                 dev_err(ctrldev,
230                         "couldn't register rng crypto alg: %d\n",
231                         ret);
232                 return ret;
233         }
234
235         caam_prng_alg.registered = true;
236
237         dev_info(ctrldev,
238                  "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name);
239
240         return 0;
241 }