1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings = EIP197_MAX_RINGS;
28 module_param(max_rings, uint, 0644);
29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
33 u32 val, htable_offset;
34 int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
36 if (priv->version == EIP197D_MRVL) {
37 cs_rc_max = EIP197D_CS_RC_MAX;
38 cs_ht_wc = EIP197D_CS_HT_WC;
39 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
40 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
42 /* Default to minimum "safe" settings */
43 cs_rc_max = EIP197B_CS_RC_MAX;
44 cs_ht_wc = EIP197B_CS_HT_WC;
45 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
46 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
49 /* Enable the record cache memory access */
50 val = readl(priv->base + EIP197_CS_RAM_CTRL);
51 val &= ~EIP197_TRC_ENABLE_MASK;
52 val |= EIP197_TRC_ENABLE_0;
53 writel(val, priv->base + EIP197_CS_RAM_CTRL);
55 /* Clear all ECC errors */
56 writel(0, priv->base + EIP197_TRC_ECCCTRL);
59 * Make sure the cache memory is accessible by taking record cache into
62 val = readl(priv->base + EIP197_TRC_PARAMS);
63 val |= EIP197_TRC_PARAMS_SW_RESET;
64 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
65 writel(val, priv->base + EIP197_TRC_PARAMS);
67 /* Clear all records */
68 for (i = 0; i < cs_rc_max; i++) {
69 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
71 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
72 EIP197_CS_RC_PREV(EIP197_RC_NULL),
75 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
77 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
78 else if (i == cs_rc_max - 1)
79 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
80 writel(val, priv->base + offset + sizeof(u32));
83 /* Clear the hash table entries */
84 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
85 for (i = 0; i < cs_ht_wc; i++)
86 writel(GENMASK(29, 0),
87 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
89 /* Disable the record cache memory access */
90 val = readl(priv->base + EIP197_CS_RAM_CTRL);
91 val &= ~EIP197_TRC_ENABLE_MASK;
92 writel(val, priv->base + EIP197_CS_RAM_CTRL);
94 /* Write head and tail pointers of the record free chain */
95 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
96 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
97 writel(val, priv->base + EIP197_TRC_FREECHAIN);
99 /* Configure the record cache #1 */
100 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
101 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
102 writel(val, priv->base + EIP197_TRC_PARAMS2);
104 /* Configure the record cache #2 */
105 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
106 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
107 EIP197_TRC_PARAMS_HTABLE_SZ(2);
108 writel(val, priv->base + EIP197_TRC_PARAMS);
111 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
112 const struct firmware *fw, int pe, u32 ctrl,
115 const u32 *data = (const u32 *)fw->data;
119 /* Reset the engine to make its program memory accessible */
120 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
121 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
122 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
123 EIP197_PE(priv) + ctrl);
125 /* Enable access to the program memory */
126 writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
128 /* Write the firmware */
129 for (i = 0; i < fw->size / sizeof(u32); i++)
130 writel(be32_to_cpu(data[i]),
131 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
133 /* Disable access to the program memory */
134 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
136 /* Release engine from reset */
137 val = readl(EIP197_PE(priv) + ctrl);
138 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
139 writel(val, EIP197_PE(priv) + ctrl);
142 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
144 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
145 const struct firmware *fw[FW_NB];
146 char fw_path[31], *dir = NULL;
147 int i, j, ret = 0, pe;
150 if (priv->version == EIP197D_MRVL)
152 else if (priv->version == EIP197B_MRVL ||
153 priv->version == EIP197_DEVBRD)
158 for (i = 0; i < FW_NB; i++) {
159 snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
160 ret = request_firmware(&fw[i], fw_path, priv->dev);
162 if (priv->version != EIP197B_MRVL)
165 /* Fallback to the old firmware location for the
168 ret = request_firmware(&fw[i], fw_name[i], priv->dev);
171 "Failed to request firmware %s (%d)\n",
178 for (pe = 0; pe < priv->config.pes; pe++) {
179 /* Clear the scratchpad memory */
180 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
181 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
182 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
183 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
184 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
185 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
187 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
188 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
190 eip197_write_firmware(priv, fw[FW_IFPP], pe,
191 EIP197_PE_ICE_FPP_CTRL(pe),
192 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
194 eip197_write_firmware(priv, fw[FW_IPUE], pe,
195 EIP197_PE_ICE_PUE_CTRL(pe),
196 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
200 for (j = 0; j < i; j++)
201 release_firmware(fw[j]);
206 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
208 u32 hdw, cd_size_rnd, val;
211 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
212 hdw &= GENMASK(27, 25);
215 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
217 for (i = 0; i < priv->config.rings; i++) {
218 /* ring base address */
219 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
220 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
221 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
222 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
224 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
225 priv->config.cd_size,
226 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
227 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
228 (EIP197_FETCH_COUNT * priv->config.cd_offset),
229 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
231 /* Configure DMA tx control */
232 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
233 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
234 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
236 /* clear any pending interrupt */
237 writel(GENMASK(5, 0),
238 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
244 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
246 u32 hdw, rd_size_rnd, val;
249 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
250 hdw &= GENMASK(27, 25);
253 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
255 for (i = 0; i < priv->config.rings; i++) {
256 /* ring base address */
257 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
258 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
259 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
260 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
262 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
263 priv->config.rd_size,
264 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
266 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
267 (EIP197_FETCH_COUNT * priv->config.rd_offset),
268 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
270 /* Configure DMA tx control */
271 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
272 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
273 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
275 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
277 /* clear any pending interrupt */
278 writel(GENMASK(7, 0),
279 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
281 /* enable ring interrupt */
282 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
283 val |= EIP197_RDR_IRQ(i);
284 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
290 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
295 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
296 priv->config.pes, priv->config.rings);
298 /* Determine endianess and configure byte swap */
299 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
300 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
302 if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
303 val |= EIP197_MST_CTRL_BYTE_SWAP;
304 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
305 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
308 * For EIP197's only set maximum number of TX commands to 2^5 = 32
309 * Skip for the EIP97 as it does not have this field.
311 if (priv->version != EIP97IES_MRVL)
312 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
314 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
316 /* Configure wr/rd cache values */
317 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
318 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
319 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
321 /* Interrupts reset */
323 /* Disable all global interrupts */
324 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
326 /* Clear any pending interrupt */
327 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
329 /* Processing Engine configuration */
330 for (pe = 0; pe < priv->config.pes; pe++) {
331 /* Data Fetch Engine configuration */
333 /* Reset all DFE threads */
334 writel(EIP197_DxE_THR_CTRL_RESET_PE,
335 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
337 if (priv->version != EIP97IES_MRVL)
338 /* Reset HIA input interface arbiter (EIP197 only) */
339 writel(EIP197_HIA_RA_PE_CTRL_RESET,
340 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
342 /* DMA transfer size to use */
343 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
344 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
345 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
346 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
347 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
348 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
349 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
350 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
352 /* Leave the DFE threads reset state */
353 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
355 /* Configure the processing engine thresholds */
356 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
357 EIP197_PE_IN_xBUF_THRES_MAX(9),
358 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
359 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
360 EIP197_PE_IN_xBUF_THRES_MAX(7),
361 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
363 if (priv->version != EIP97IES_MRVL)
364 /* enable HIA input interface arbiter and rings */
365 writel(EIP197_HIA_RA_PE_CTRL_EN |
366 GENMASK(priv->config.rings - 1, 0),
367 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
369 /* Data Store Engine configuration */
371 /* Reset all DSE threads */
372 writel(EIP197_DxE_THR_CTRL_RESET_PE,
373 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
375 /* Wait for all DSE threads to complete */
376 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
377 GENMASK(15, 12)) != GENMASK(15, 12))
380 /* DMA transfer size to use */
381 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
382 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
383 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
384 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
385 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
386 /* FIXME: instability issues can occur for EIP97 but disabling
387 * it impacts performance.
389 if (priv->version != EIP97IES_MRVL)
390 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
391 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
393 /* Leave the DSE threads reset state */
394 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
396 /* Configure the procesing engine thresholds */
397 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
398 EIP197_PE_OUT_DBUF_THRES_MAX(8),
399 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
401 /* Processing Engine configuration */
403 /* Token & context configuration */
404 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
405 EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
406 EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
407 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
409 /* H/W capabilities selection: just enable everything */
410 writel(EIP197_FUNCTION_ALL,
411 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
414 /* Command Descriptor Rings prepare */
415 for (i = 0; i < priv->config.rings; i++) {
416 /* Clear interrupts for this ring */
417 writel(GENMASK(31, 0),
418 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
420 /* Disable external triggering */
421 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
423 /* Clear the pending prepared counter */
424 writel(EIP197_xDR_PREP_CLR_COUNT,
425 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
427 /* Clear the pending processed counter */
428 writel(EIP197_xDR_PROC_CLR_COUNT,
429 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
432 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
434 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
436 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
437 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
440 /* Result Descriptor Ring prepare */
441 for (i = 0; i < priv->config.rings; i++) {
442 /* Disable external triggering*/
443 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
445 /* Clear the pending prepared counter */
446 writel(EIP197_xDR_PREP_CLR_COUNT,
447 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
449 /* Clear the pending processed counter */
450 writel(EIP197_xDR_PROC_CLR_COUNT,
451 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
454 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
456 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
459 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
460 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
463 for (pe = 0; pe < priv->config.pes; pe++) {
464 /* Enable command descriptor rings */
465 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
466 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
468 /* Enable result descriptor rings */
469 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
470 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
473 /* Clear any HIA interrupt */
474 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
476 if (priv->version != EIP97IES_MRVL) {
477 eip197_trc_cache_init(priv);
479 ret = eip197_load_firmwares(priv);
484 safexcel_hw_setup_cdesc_rings(priv);
485 safexcel_hw_setup_rdesc_rings(priv);
490 /* Called with ring's lock taken */
491 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
494 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
499 /* Configure when we want an interrupt */
500 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
501 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
502 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
505 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
507 struct crypto_async_request *req, *backlog;
508 struct safexcel_context *ctx;
509 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
511 /* If a request wasn't properly dequeued because of a lack of resources,
512 * proceeded it first,
514 req = priv->ring[ring].req;
515 backlog = priv->ring[ring].backlog;
520 spin_lock_bh(&priv->ring[ring].queue_lock);
521 backlog = crypto_get_backlog(&priv->ring[ring].queue);
522 req = crypto_dequeue_request(&priv->ring[ring].queue);
523 spin_unlock_bh(&priv->ring[ring].queue_lock);
526 priv->ring[ring].req = NULL;
527 priv->ring[ring].backlog = NULL;
532 ctx = crypto_tfm_ctx(req->tfm);
533 ret = ctx->send(req, ring, &commands, &results);
538 backlog->complete(backlog, -EINPROGRESS);
540 /* In case the send() helper did not issue any command to push
541 * to the engine because the input data was cached, continue to
542 * dequeue other requests as this is valid and not an error.
544 if (!commands && !results)
553 /* Not enough resources to handle all the requests. Bail out and save
554 * the request and the backlog for the next dequeue call (per-ring).
556 priv->ring[ring].req = req;
557 priv->ring[ring].backlog = backlog;
563 spin_lock_bh(&priv->ring[ring].lock);
565 priv->ring[ring].requests += nreq;
567 if (!priv->ring[ring].busy) {
568 safexcel_try_push_requests(priv, ring);
569 priv->ring[ring].busy = true;
572 spin_unlock_bh(&priv->ring[ring].lock);
574 /* let the RDR know we have pending descriptors */
575 writel((rdesc * priv->config.rd_offset) << 2,
576 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
578 /* let the CDR know we have pending descriptors */
579 writel((cdesc * priv->config.cd_offset) << 2,
580 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
583 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
584 struct safexcel_result_desc *rdesc)
586 if (likely((!rdesc->descriptor_overflow) &&
587 (!rdesc->buffer_overflow) &&
588 (!rdesc->result_data.error_code)))
591 if (rdesc->descriptor_overflow)
592 dev_err(priv->dev, "Descriptor overflow detected");
594 if (rdesc->buffer_overflow)
595 dev_err(priv->dev, "Buffer overflow detected");
597 if (rdesc->result_data.error_code & 0x4066) {
598 /* Fatal error (bits 1,2,5,6 & 14) */
600 "result descriptor error (%x)",
601 rdesc->result_data.error_code);
603 } else if (rdesc->result_data.error_code &
604 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
606 * Give priority over authentication fails:
607 * Blocksize, length & overflow errors,
608 * something wrong with the input!
611 } else if (rdesc->result_data.error_code & BIT(9)) {
612 /* Authentication failed */
616 /* All other non-fatal errors */
620 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
622 struct safexcel_result_desc *rdesc,
623 struct crypto_async_request *req)
625 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
627 priv->ring[ring].rdr_req[i] = req;
630 inline struct crypto_async_request *
631 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
633 int i = safexcel_ring_first_rdr_index(priv, ring);
635 return priv->ring[ring].rdr_req[i];
638 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
640 struct safexcel_command_desc *cdesc;
642 /* Acknowledge the command descriptors */
644 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
647 "Could not retrieve the command descriptor\n");
650 } while (!cdesc->last_seg);
653 void safexcel_inv_complete(struct crypto_async_request *req, int error)
655 struct safexcel_inv_result *result = req->data;
657 if (error == -EINPROGRESS)
660 result->error = error;
661 complete(&result->completion);
664 int safexcel_invalidate_cache(struct crypto_async_request *async,
665 struct safexcel_crypto_priv *priv,
666 dma_addr_t ctxr_dma, int ring)
668 struct safexcel_command_desc *cdesc;
669 struct safexcel_result_desc *rdesc;
672 /* Prepare command descriptor */
673 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
675 return PTR_ERR(cdesc);
677 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
678 cdesc->control_data.options = 0;
679 cdesc->control_data.refresh = 0;
680 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
682 /* Prepare result descriptor */
683 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
686 ret = PTR_ERR(rdesc);
690 safexcel_rdr_req_set(priv, ring, rdesc, async);
695 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
700 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
703 struct crypto_async_request *req;
704 struct safexcel_context *ctx;
705 int ret, i, nreq, ndesc, tot_descs, handled = 0;
706 bool should_complete;
711 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
712 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
713 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
717 for (i = 0; i < nreq; i++) {
718 req = safexcel_rdr_req_get(priv, ring);
720 ctx = crypto_tfm_ctx(req->tfm);
721 ndesc = ctx->handle_result(priv, ring, req,
722 &should_complete, &ret);
724 dev_err(priv->dev, "failed to handle result (%d)\n",
729 if (should_complete) {
731 req->complete(req, ret);
741 writel(EIP197_xDR_PROC_xD_PKT(i) |
742 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
743 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
745 /* If the number of requests overflowed the counter, try to proceed more
748 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
752 spin_lock_bh(&priv->ring[ring].lock);
754 priv->ring[ring].requests -= handled;
755 safexcel_try_push_requests(priv, ring);
757 if (!priv->ring[ring].requests)
758 priv->ring[ring].busy = false;
760 spin_unlock_bh(&priv->ring[ring].lock);
763 static void safexcel_dequeue_work(struct work_struct *work)
765 struct safexcel_work_data *data =
766 container_of(work, struct safexcel_work_data, work);
768 safexcel_dequeue(data->priv, data->ring);
771 struct safexcel_ring_irq_data {
772 struct safexcel_crypto_priv *priv;
776 static irqreturn_t safexcel_irq_ring(int irq, void *data)
778 struct safexcel_ring_irq_data *irq_data = data;
779 struct safexcel_crypto_priv *priv = irq_data->priv;
780 int ring = irq_data->ring, rc = IRQ_NONE;
783 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
788 if (status & EIP197_RDR_IRQ(ring)) {
789 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
791 if (unlikely(stat & EIP197_xDR_ERR)) {
793 * Fatal error, the RDR is unusable and must be
794 * reinitialized. This should not happen under
795 * normal circumstances.
797 dev_err(priv->dev, "RDR: fatal error.\n");
798 } else if (likely(stat & EIP197_xDR_THRESH)) {
799 rc = IRQ_WAKE_THREAD;
802 /* ACK the interrupts */
804 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
807 /* ACK the interrupts */
808 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
813 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
815 struct safexcel_ring_irq_data *irq_data = data;
816 struct safexcel_crypto_priv *priv = irq_data->priv;
817 int ring = irq_data->ring;
819 safexcel_handle_result_descriptor(priv, ring);
821 queue_work(priv->ring[ring].workqueue,
822 &priv->ring[ring].work_data.work);
827 static int safexcel_request_ring_irq(void *pdev, int irqid,
829 irq_handler_t handler,
830 irq_handler_t threaded_handler,
831 struct safexcel_ring_irq_data *ring_irq_priv)
836 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
837 struct pci_dev *pci_pdev = pdev;
839 dev = &pci_pdev->dev;
840 irq = pci_irq_vector(pci_pdev, irqid);
842 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
846 } else if (IS_ENABLED(CONFIG_OF)) {
847 struct platform_device *plf_pdev = pdev;
848 char irq_name[6] = {0}; /* "ringX\0" */
850 snprintf(irq_name, 6, "ring%d", irqid);
851 dev = &plf_pdev->dev;
852 irq = platform_get_irq_byname(plf_pdev, irq_name);
855 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
861 ret = devm_request_threaded_irq(dev, irq, handler,
862 threaded_handler, IRQF_ONESHOT,
863 dev_name(dev), ring_irq_priv);
865 dev_err(dev, "unable to request IRQ %d\n", irq);
872 static struct safexcel_alg_template *safexcel_algs[] = {
873 &safexcel_alg_ecb_des,
874 &safexcel_alg_cbc_des,
875 &safexcel_alg_ecb_des3_ede,
876 &safexcel_alg_cbc_des3_ede,
877 &safexcel_alg_ecb_aes,
878 &safexcel_alg_cbc_aes,
879 &safexcel_alg_ctr_aes,
882 &safexcel_alg_sha224,
883 &safexcel_alg_sha256,
884 &safexcel_alg_sha384,
885 &safexcel_alg_sha512,
886 &safexcel_alg_hmac_md5,
887 &safexcel_alg_hmac_sha1,
888 &safexcel_alg_hmac_sha224,
889 &safexcel_alg_hmac_sha256,
890 &safexcel_alg_hmac_sha384,
891 &safexcel_alg_hmac_sha512,
892 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
893 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
894 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
895 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
896 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
897 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
898 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
899 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
900 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
901 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
902 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
905 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
909 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
910 safexcel_algs[i]->priv = priv;
912 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
913 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
914 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
915 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
917 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
926 for (j = 0; j < i; j++) {
927 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
928 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
929 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
930 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
932 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
938 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
942 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
943 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
944 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
945 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
946 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
948 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
952 static void safexcel_configure(struct safexcel_crypto_priv *priv)
956 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
958 /* Read number of PEs from the engine */
959 if (priv->version == EIP97IES_MRVL)
960 /* Narrow field width for EIP97 type engine */
961 mask = EIP97_N_PES_MASK;
963 /* Wider field width for all EIP197 type engines */
964 mask = EIP197_N_PES_MASK;
966 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
968 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
970 val = (val & GENMASK(27, 25)) >> 25;
973 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
974 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
976 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
977 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
980 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
982 struct safexcel_register_offsets *offsets = &priv->offsets;
984 if (priv->version == EIP97IES_MRVL) {
985 offsets->hia_aic = EIP97_HIA_AIC_BASE;
986 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
987 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
988 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
989 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
990 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
991 offsets->hia_dse = EIP97_HIA_DSE_BASE;
992 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
993 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
994 offsets->pe = EIP97_PE_BASE;
996 offsets->hia_aic = EIP197_HIA_AIC_BASE;
997 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
998 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
999 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1000 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1001 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1002 offsets->hia_dse = EIP197_HIA_DSE_BASE;
1003 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1004 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1005 offsets->pe = EIP197_PE_BASE;
1010 * Generic part of probe routine, shared by platform and PCI driver
1012 * Assumes IO resources have been mapped, private data mem has been allocated,
1013 * clocks have been enabled, device pointer has been assigned etc.
1016 static int safexcel_probe_generic(void *pdev,
1017 struct safexcel_crypto_priv *priv,
1020 struct device *dev = priv->dev;
1023 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1024 sizeof(struct safexcel_context_record),
1026 if (!priv->context_pool)
1029 safexcel_init_register_offsets(priv);
1031 if (priv->version != EIP97IES_MRVL)
1032 priv->flags |= EIP197_TRC_CACHE;
1034 safexcel_configure(priv);
1036 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1038 * Request MSI vectors for global + 1 per ring -
1039 * or just 1 for older dev images
1041 struct pci_dev *pci_pdev = pdev;
1043 ret = pci_alloc_irq_vectors(pci_pdev,
1044 priv->config.rings + 1,
1045 priv->config.rings + 1,
1046 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1048 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1053 /* Register the ring IRQ handlers and configure the rings */
1054 priv->ring = devm_kcalloc(dev, priv->config.rings,
1055 sizeof(*priv->ring),
1060 for (i = 0; i < priv->config.rings; i++) {
1061 char wq_name[9] = {0};
1063 struct safexcel_ring_irq_data *ring_irq;
1065 ret = safexcel_init_ring_descriptors(priv,
1067 &priv->ring[i].rdr);
1069 dev_err(dev, "Failed to initialize rings\n");
1073 priv->ring[i].rdr_req = devm_kcalloc(dev,
1074 EIP197_DEFAULT_RING_SIZE,
1075 sizeof(priv->ring[i].rdr_req),
1077 if (!priv->ring[i].rdr_req)
1080 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1084 ring_irq->priv = priv;
1087 irq = safexcel_request_ring_irq(pdev,
1088 EIP197_IRQ_NUMBER(i, is_pci_dev),
1091 safexcel_irq_ring_thread,
1094 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1098 priv->ring[i].work_data.priv = priv;
1099 priv->ring[i].work_data.ring = i;
1100 INIT_WORK(&priv->ring[i].work_data.work,
1101 safexcel_dequeue_work);
1103 snprintf(wq_name, 9, "wq_ring%d", i);
1104 priv->ring[i].workqueue =
1105 create_singlethread_workqueue(wq_name);
1106 if (!priv->ring[i].workqueue)
1109 priv->ring[i].requests = 0;
1110 priv->ring[i].busy = false;
1112 crypto_init_queue(&priv->ring[i].queue,
1113 EIP197_DEFAULT_RING_SIZE);
1115 spin_lock_init(&priv->ring[i].lock);
1116 spin_lock_init(&priv->ring[i].queue_lock);
1119 atomic_set(&priv->ring_used, 0);
1121 ret = safexcel_hw_init(priv);
1123 dev_err(dev, "HW init failed (%d)\n", ret);
1127 ret = safexcel_register_algorithms(priv);
1129 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1136 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1140 for (i = 0; i < priv->config.rings; i++) {
1141 /* clear any pending interrupt */
1142 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1143 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1145 /* Reset the CDR base address */
1146 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1147 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1149 /* Reset the RDR base address */
1150 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1151 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1155 #if IS_ENABLED(CONFIG_OF)
1156 /* for Device Tree platform driver */
1158 static int safexcel_probe(struct platform_device *pdev)
1160 struct device *dev = &pdev->dev;
1161 struct safexcel_crypto_priv *priv;
1164 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1169 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1171 platform_set_drvdata(pdev, priv);
1173 priv->base = devm_platform_ioremap_resource(pdev, 0);
1174 if (IS_ERR(priv->base)) {
1175 dev_err(dev, "failed to get resource\n");
1176 return PTR_ERR(priv->base);
1179 priv->clk = devm_clk_get(&pdev->dev, NULL);
1180 ret = PTR_ERR_OR_ZERO(priv->clk);
1181 /* The clock isn't mandatory */
1182 if (ret != -ENOENT) {
1186 ret = clk_prepare_enable(priv->clk);
1188 dev_err(dev, "unable to enable clk (%d)\n", ret);
1193 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1194 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1195 /* The clock isn't mandatory */
1196 if (ret != -ENOENT) {
1200 ret = clk_prepare_enable(priv->reg_clk);
1202 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1207 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1211 /* Generic EIP97/EIP197 device probing */
1212 ret = safexcel_probe_generic(pdev, priv, 0);
1219 clk_disable_unprepare(priv->reg_clk);
1221 clk_disable_unprepare(priv->clk);
1225 static int safexcel_remove(struct platform_device *pdev)
1227 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1230 safexcel_unregister_algorithms(priv);
1231 safexcel_hw_reset_rings(priv);
1233 clk_disable_unprepare(priv->clk);
1235 for (i = 0; i < priv->config.rings; i++)
1236 destroy_workqueue(priv->ring[i].workqueue);
1241 static const struct of_device_id safexcel_of_match_table[] = {
1243 .compatible = "inside-secure,safexcel-eip97ies",
1244 .data = (void *)EIP97IES_MRVL,
1247 .compatible = "inside-secure,safexcel-eip197b",
1248 .data = (void *)EIP197B_MRVL,
1251 .compatible = "inside-secure,safexcel-eip197d",
1252 .data = (void *)EIP197D_MRVL,
1254 /* For backward compatibility and intended for generic use */
1256 .compatible = "inside-secure,safexcel-eip97",
1257 .data = (void *)EIP97IES_MRVL,
1260 .compatible = "inside-secure,safexcel-eip197",
1261 .data = (void *)EIP197B_MRVL,
1266 static struct platform_driver crypto_safexcel = {
1267 .probe = safexcel_probe,
1268 .remove = safexcel_remove,
1270 .name = "crypto-safexcel",
1271 .of_match_table = safexcel_of_match_table,
1276 #if IS_ENABLED(CONFIG_PCI)
1277 /* PCIE devices - i.e. Inside Secure development boards */
1279 static int safexcel_pci_probe(struct pci_dev *pdev,
1280 const struct pci_device_id *ent)
1282 struct device *dev = &pdev->dev;
1283 struct safexcel_crypto_priv *priv;
1284 void __iomem *pciebase;
1288 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1289 ent->vendor, ent->device, ent->subvendor,
1290 ent->subdevice, ent->driver_data);
1292 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1297 priv->version = (enum safexcel_eip_version)ent->driver_data;
1299 pci_set_drvdata(pdev, priv);
1301 /* enable the device */
1302 rc = pcim_enable_device(pdev);
1304 dev_err(dev, "Failed to enable PCI device\n");
1308 /* take ownership of PCI BAR0 */
1309 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1311 dev_err(dev, "Failed to map IO region for BAR0\n");
1314 priv->base = pcim_iomap_table(pdev)[0];
1316 if (priv->version == EIP197_DEVBRD) {
1317 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1319 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1321 dev_err(dev, "Failed to map IO region for BAR4\n");
1325 pciebase = pcim_iomap_table(pdev)[2];
1326 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1327 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1328 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1331 /* Setup MSI identity map mapping */
1332 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1333 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1334 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1335 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1336 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1337 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1338 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1339 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1341 /* Enable all device interrupts */
1342 writel(GENMASK(31, 0),
1343 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1345 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1350 /* HW reset FPGA dev board */
1352 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1353 wmb(); /* maintain strict ordering for accesses here */
1354 /* deassert reset */
1355 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1356 wmb(); /* maintain strict ordering for accesses here */
1359 /* enable bus mastering */
1360 pci_set_master(pdev);
1362 /* Generic EIP97/EIP197 device probing */
1363 rc = safexcel_probe_generic(pdev, priv, 1);
1367 void safexcel_pci_remove(struct pci_dev *pdev)
1369 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1372 safexcel_unregister_algorithms(priv);
1374 for (i = 0; i < priv->config.rings; i++)
1375 destroy_workqueue(priv->ring[i].workqueue);
1377 safexcel_hw_reset_rings(priv);
1380 static const struct pci_device_id safexcel_pci_ids[] = {
1382 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1384 /* assume EIP197B for now */
1385 .driver_data = EIP197_DEVBRD,
1390 MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1392 static struct pci_driver safexcel_pci_driver = {
1393 .name = "crypto-safexcel",
1394 .id_table = safexcel_pci_ids,
1395 .probe = safexcel_pci_probe,
1396 .remove = safexcel_pci_remove,
1400 static int __init safexcel_init(void)
1404 #if IS_ENABLED(CONFIG_OF)
1405 /* Register platform driver */
1406 platform_driver_register(&crypto_safexcel);
1409 #if IS_ENABLED(CONFIG_PCI)
1410 /* Register PCI driver */
1411 rc = pci_register_driver(&safexcel_pci_driver);
1417 static void __exit safexcel_exit(void)
1419 #if IS_ENABLED(CONFIG_OF)
1420 /* Unregister platform driver */
1421 platform_driver_unregister(&crypto_safexcel);
1424 #if IS_ENABLED(CONFIG_PCI)
1425 /* Unregister PCI driver if successfully registered before */
1426 pci_unregister_driver(&safexcel_pci_driver);
1430 module_init(safexcel_init);
1431 module_exit(safexcel_exit);
1433 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1434 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1435 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1436 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1437 MODULE_LICENSE("GPL v2");