1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings = EIP197_MAX_RINGS;
28 module_param(max_rings, uint, 0644);
29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
33 u32 val, htable_offset;
34 int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
36 if (priv->version == EIP197D_MRVL) {
37 cs_rc_max = EIP197D_CS_RC_MAX;
38 cs_ht_wc = EIP197D_CS_HT_WC;
39 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
40 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
42 /* Default to minimum "safe" settings */
43 cs_rc_max = EIP197B_CS_RC_MAX;
44 cs_ht_wc = EIP197B_CS_HT_WC;
45 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
46 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
49 /* Enable the record cache memory access */
50 val = readl(priv->base + EIP197_CS_RAM_CTRL);
51 val &= ~EIP197_TRC_ENABLE_MASK;
52 val |= EIP197_TRC_ENABLE_0;
53 writel(val, priv->base + EIP197_CS_RAM_CTRL);
55 /* Clear all ECC errors */
56 writel(0, priv->base + EIP197_TRC_ECCCTRL);
59 * Make sure the cache memory is accessible by taking record cache into
62 val = readl(priv->base + EIP197_TRC_PARAMS);
63 val |= EIP197_TRC_PARAMS_SW_RESET;
64 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
65 writel(val, priv->base + EIP197_TRC_PARAMS);
67 /* Clear all records */
68 for (i = 0; i < cs_rc_max; i++) {
69 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
71 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
72 EIP197_CS_RC_PREV(EIP197_RC_NULL),
75 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
77 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
78 else if (i == cs_rc_max - 1)
79 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
80 writel(val, priv->base + offset + sizeof(u32));
83 /* Clear the hash table entries */
84 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
85 for (i = 0; i < cs_ht_wc; i++)
86 writel(GENMASK(29, 0),
87 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
89 /* Disable the record cache memory access */
90 val = readl(priv->base + EIP197_CS_RAM_CTRL);
91 val &= ~EIP197_TRC_ENABLE_MASK;
92 writel(val, priv->base + EIP197_CS_RAM_CTRL);
94 /* Write head and tail pointers of the record free chain */
95 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
96 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
97 writel(val, priv->base + EIP197_TRC_FREECHAIN);
99 /* Configure the record cache #1 */
100 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
101 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
102 writel(val, priv->base + EIP197_TRC_PARAMS2);
104 /* Configure the record cache #2 */
105 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
106 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
107 EIP197_TRC_PARAMS_HTABLE_SZ(2);
108 writel(val, priv->base + EIP197_TRC_PARAMS);
111 static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
116 for (pe = 0; pe < priv->config.pes; pe++) {
117 /* Configure the token FIFO's */
118 writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
119 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
121 /* Clear the ICE scratchpad memory */
122 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
123 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
124 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
125 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
126 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
127 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
129 /* clear the scratchpad RAM using 32 bit writes only */
130 for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
131 writel(0, EIP197_PE(priv) +
132 EIP197_PE_ICE_SCRATCH_RAM(pe) + (i<<2));
134 /* Reset the IFPP engine to make its program mem accessible */
135 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
136 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
137 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
138 EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
140 /* Reset the IPUE engine to make its program mem accessible */
141 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
142 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
143 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
144 EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
146 /* Enable access to all IFPP program memories */
147 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
148 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
153 static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
154 const struct firmware *fw)
156 const u32 *data = (const u32 *)fw->data;
159 /* Write the firmware */
160 for (i = 0; i < fw->size / sizeof(u32); i++)
161 writel(be32_to_cpu(data[i]),
162 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
164 /* Exclude final 2 NOPs from size */
165 return i - EIP197_FW_TERMINAL_NOPS;
169 * If FW is actual production firmware, then poll for its initialization
170 * to complete and check if it is good for the HW, otherwise just return OK.
172 static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
178 pollofs = EIP197_FW_FPP_READY;
180 pollofs = EIP197_FW_PUE_READY;
182 for (pe = 0; pe < priv->config.pes; pe++) {
183 base = EIP197_PE_ICE_SCRATCH_RAM(pe);
184 pollcnt = EIP197_FW_START_POLLCNT;
186 (readl_relaxed(EIP197_PE(priv) + base +
191 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
199 static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
200 int ipuesz, int ifppsz, int minifw)
205 for (pe = 0; pe < priv->config.pes; pe++) {
206 /* Disable access to all program memory */
207 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
209 /* Start IFPP microengines */
213 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
214 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
215 EIP197_PE_ICE_UENG_DEBUG_RESET;
216 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
218 /* Start IPUE microengines */
222 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
223 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
224 EIP197_PE_ICE_UENG_DEBUG_RESET;
225 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
228 /* For miniFW startup, there is no initialization, so always succeed */
232 /* Wait until all the firmwares have properly started up */
233 if (!poll_fw_ready(priv, 1))
235 if (!poll_fw_ready(priv, 0))
241 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
243 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
244 const struct firmware *fw[FW_NB];
245 char fw_path[37], *dir = NULL;
246 int i, j, ret = 0, pe;
247 int ipuesz, ifppsz, minifw = 0;
249 if (priv->version == EIP197D_MRVL)
251 else if (priv->version == EIP197B_MRVL ||
252 priv->version == EIP197_DEVBRD)
258 for (i = 0; i < FW_NB; i++) {
259 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
260 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
262 if (minifw || priv->version != EIP197B_MRVL)
265 /* Fallback to the old firmware location for the
268 ret = firmware_request_nowarn(&fw[i], fw_name[i],
275 eip197_init_firmware(priv);
277 ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
279 /* Enable access to IPUE program memories */
280 for (pe = 0; pe < priv->config.pes; pe++)
281 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
282 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
284 ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
286 if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
287 dev_dbg(priv->dev, "Firmware loaded successfully\n");
294 for (j = 0; j < i; j++)
295 release_firmware(fw[j]);
298 /* Retry with minifw path */
299 dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
300 dir = "eip197_minifw";
305 dev_dbg(priv->dev, "Firmware load failed.\n");
310 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
312 u32 hdw, cd_size_rnd, val;
315 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
316 hdw &= GENMASK(27, 25);
319 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
321 for (i = 0; i < priv->config.rings; i++) {
322 /* ring base address */
323 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
324 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
325 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
326 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
328 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
329 priv->config.cd_size,
330 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
331 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
332 (EIP197_FETCH_COUNT * priv->config.cd_offset),
333 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
335 /* Configure DMA tx control */
336 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
337 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
338 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
340 /* clear any pending interrupt */
341 writel(GENMASK(5, 0),
342 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
348 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
350 u32 hdw, rd_size_rnd, val;
353 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
354 hdw &= GENMASK(27, 25);
357 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
359 for (i = 0; i < priv->config.rings; i++) {
360 /* ring base address */
361 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
362 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
363 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
364 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
366 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
367 priv->config.rd_size,
368 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
370 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
371 (EIP197_FETCH_COUNT * priv->config.rd_offset),
372 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
374 /* Configure DMA tx control */
375 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
376 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
377 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
379 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
381 /* clear any pending interrupt */
382 writel(GENMASK(7, 0),
383 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
385 /* enable ring interrupt */
386 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
387 val |= EIP197_RDR_IRQ(i);
388 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
394 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
399 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
400 priv->config.pes, priv->config.rings);
402 /* Determine endianess and configure byte swap */
403 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
404 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
406 if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
407 val |= EIP197_MST_CTRL_BYTE_SWAP;
408 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
409 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
412 * For EIP197's only set maximum number of TX commands to 2^5 = 32
413 * Skip for the EIP97 as it does not have this field.
415 if (priv->version != EIP97IES_MRVL)
416 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
418 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
420 /* Configure wr/rd cache values */
421 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
422 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
423 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
425 /* Interrupts reset */
427 /* Disable all global interrupts */
428 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
430 /* Clear any pending interrupt */
431 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
433 /* Processing Engine configuration */
434 for (pe = 0; pe < priv->config.pes; pe++) {
435 /* Data Fetch Engine configuration */
437 /* Reset all DFE threads */
438 writel(EIP197_DxE_THR_CTRL_RESET_PE,
439 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
441 if (priv->version != EIP97IES_MRVL)
442 /* Reset HIA input interface arbiter (EIP197 only) */
443 writel(EIP197_HIA_RA_PE_CTRL_RESET,
444 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
446 /* DMA transfer size to use */
447 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
448 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
449 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
450 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
451 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
452 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
453 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
454 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
456 /* Leave the DFE threads reset state */
457 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
459 /* Configure the processing engine thresholds */
460 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
461 EIP197_PE_IN_xBUF_THRES_MAX(9),
462 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
463 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
464 EIP197_PE_IN_xBUF_THRES_MAX(7),
465 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
467 if (priv->version != EIP97IES_MRVL)
468 /* enable HIA input interface arbiter and rings */
469 writel(EIP197_HIA_RA_PE_CTRL_EN |
470 GENMASK(priv->config.rings - 1, 0),
471 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
473 /* Data Store Engine configuration */
475 /* Reset all DSE threads */
476 writel(EIP197_DxE_THR_CTRL_RESET_PE,
477 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
479 /* Wait for all DSE threads to complete */
480 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
481 GENMASK(15, 12)) != GENMASK(15, 12))
484 /* DMA transfer size to use */
485 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
486 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
487 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
488 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
489 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
490 /* FIXME: instability issues can occur for EIP97 but disabling
491 * it impacts performance.
493 if (priv->version != EIP97IES_MRVL)
494 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
495 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
497 /* Leave the DSE threads reset state */
498 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
500 /* Configure the procesing engine thresholds */
501 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
502 EIP197_PE_OUT_DBUF_THRES_MAX(8),
503 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
505 /* Processing Engine configuration */
507 /* Token & context configuration */
508 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
509 EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
510 EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
511 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
513 /* H/W capabilities selection: just enable everything */
514 writel(EIP197_FUNCTION_ALL,
515 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
518 /* Command Descriptor Rings prepare */
519 for (i = 0; i < priv->config.rings; i++) {
520 /* Clear interrupts for this ring */
521 writel(GENMASK(31, 0),
522 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
524 /* Disable external triggering */
525 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
527 /* Clear the pending prepared counter */
528 writel(EIP197_xDR_PREP_CLR_COUNT,
529 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
531 /* Clear the pending processed counter */
532 writel(EIP197_xDR_PROC_CLR_COUNT,
533 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
536 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
538 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
540 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
541 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
544 /* Result Descriptor Ring prepare */
545 for (i = 0; i < priv->config.rings; i++) {
546 /* Disable external triggering*/
547 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
549 /* Clear the pending prepared counter */
550 writel(EIP197_xDR_PREP_CLR_COUNT,
551 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
553 /* Clear the pending processed counter */
554 writel(EIP197_xDR_PROC_CLR_COUNT,
555 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
558 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
560 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
563 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
564 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
567 for (pe = 0; pe < priv->config.pes; pe++) {
568 /* Enable command descriptor rings */
569 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
570 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
572 /* Enable result descriptor rings */
573 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
574 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
577 /* Clear any HIA interrupt */
578 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
580 if (priv->version != EIP97IES_MRVL) {
581 eip197_trc_cache_init(priv);
583 ret = eip197_load_firmwares(priv);
588 safexcel_hw_setup_cdesc_rings(priv);
589 safexcel_hw_setup_rdesc_rings(priv);
594 /* Called with ring's lock taken */
595 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
598 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
603 /* Configure when we want an interrupt */
604 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
605 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
606 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
609 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
611 struct crypto_async_request *req, *backlog;
612 struct safexcel_context *ctx;
613 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
615 /* If a request wasn't properly dequeued because of a lack of resources,
616 * proceeded it first,
618 req = priv->ring[ring].req;
619 backlog = priv->ring[ring].backlog;
624 spin_lock_bh(&priv->ring[ring].queue_lock);
625 backlog = crypto_get_backlog(&priv->ring[ring].queue);
626 req = crypto_dequeue_request(&priv->ring[ring].queue);
627 spin_unlock_bh(&priv->ring[ring].queue_lock);
630 priv->ring[ring].req = NULL;
631 priv->ring[ring].backlog = NULL;
636 ctx = crypto_tfm_ctx(req->tfm);
637 ret = ctx->send(req, ring, &commands, &results);
642 backlog->complete(backlog, -EINPROGRESS);
644 /* In case the send() helper did not issue any command to push
645 * to the engine because the input data was cached, continue to
646 * dequeue other requests as this is valid and not an error.
648 if (!commands && !results)
657 /* Not enough resources to handle all the requests. Bail out and save
658 * the request and the backlog for the next dequeue call (per-ring).
660 priv->ring[ring].req = req;
661 priv->ring[ring].backlog = backlog;
667 spin_lock_bh(&priv->ring[ring].lock);
669 priv->ring[ring].requests += nreq;
671 if (!priv->ring[ring].busy) {
672 safexcel_try_push_requests(priv, ring);
673 priv->ring[ring].busy = true;
676 spin_unlock_bh(&priv->ring[ring].lock);
678 /* let the RDR know we have pending descriptors */
679 writel((rdesc * priv->config.rd_offset) << 2,
680 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
682 /* let the CDR know we have pending descriptors */
683 writel((cdesc * priv->config.cd_offset) << 2,
684 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
687 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
688 struct safexcel_result_desc *rdesc)
690 if (likely((!rdesc->descriptor_overflow) &&
691 (!rdesc->buffer_overflow) &&
692 (!rdesc->result_data.error_code)))
695 if (rdesc->descriptor_overflow)
696 dev_err(priv->dev, "Descriptor overflow detected");
698 if (rdesc->buffer_overflow)
699 dev_err(priv->dev, "Buffer overflow detected");
701 if (rdesc->result_data.error_code & 0x4066) {
702 /* Fatal error (bits 1,2,5,6 & 14) */
704 "result descriptor error (%x)",
705 rdesc->result_data.error_code);
707 } else if (rdesc->result_data.error_code &
708 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
710 * Give priority over authentication fails:
711 * Blocksize, length & overflow errors,
712 * something wrong with the input!
715 } else if (rdesc->result_data.error_code & BIT(9)) {
716 /* Authentication failed */
720 /* All other non-fatal errors */
724 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
726 struct safexcel_result_desc *rdesc,
727 struct crypto_async_request *req)
729 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
731 priv->ring[ring].rdr_req[i] = req;
734 inline struct crypto_async_request *
735 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
737 int i = safexcel_ring_first_rdr_index(priv, ring);
739 return priv->ring[ring].rdr_req[i];
742 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
744 struct safexcel_command_desc *cdesc;
746 /* Acknowledge the command descriptors */
748 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
751 "Could not retrieve the command descriptor\n");
754 } while (!cdesc->last_seg);
757 void safexcel_inv_complete(struct crypto_async_request *req, int error)
759 struct safexcel_inv_result *result = req->data;
761 if (error == -EINPROGRESS)
764 result->error = error;
765 complete(&result->completion);
768 int safexcel_invalidate_cache(struct crypto_async_request *async,
769 struct safexcel_crypto_priv *priv,
770 dma_addr_t ctxr_dma, int ring)
772 struct safexcel_command_desc *cdesc;
773 struct safexcel_result_desc *rdesc;
776 /* Prepare command descriptor */
777 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
779 return PTR_ERR(cdesc);
781 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
782 cdesc->control_data.options = 0;
783 cdesc->control_data.refresh = 0;
784 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
786 /* Prepare result descriptor */
787 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
790 ret = PTR_ERR(rdesc);
794 safexcel_rdr_req_set(priv, ring, rdesc, async);
799 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
804 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
807 struct crypto_async_request *req;
808 struct safexcel_context *ctx;
809 int ret, i, nreq, ndesc, tot_descs, handled = 0;
810 bool should_complete;
815 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
816 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
817 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
821 for (i = 0; i < nreq; i++) {
822 req = safexcel_rdr_req_get(priv, ring);
824 ctx = crypto_tfm_ctx(req->tfm);
825 ndesc = ctx->handle_result(priv, ring, req,
826 &should_complete, &ret);
828 dev_err(priv->dev, "failed to handle result (%d)\n",
833 if (should_complete) {
835 req->complete(req, ret);
845 writel(EIP197_xDR_PROC_xD_PKT(i) |
846 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
847 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
849 /* If the number of requests overflowed the counter, try to proceed more
852 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
856 spin_lock_bh(&priv->ring[ring].lock);
858 priv->ring[ring].requests -= handled;
859 safexcel_try_push_requests(priv, ring);
861 if (!priv->ring[ring].requests)
862 priv->ring[ring].busy = false;
864 spin_unlock_bh(&priv->ring[ring].lock);
867 static void safexcel_dequeue_work(struct work_struct *work)
869 struct safexcel_work_data *data =
870 container_of(work, struct safexcel_work_data, work);
872 safexcel_dequeue(data->priv, data->ring);
875 struct safexcel_ring_irq_data {
876 struct safexcel_crypto_priv *priv;
880 static irqreturn_t safexcel_irq_ring(int irq, void *data)
882 struct safexcel_ring_irq_data *irq_data = data;
883 struct safexcel_crypto_priv *priv = irq_data->priv;
884 int ring = irq_data->ring, rc = IRQ_NONE;
887 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
892 if (status & EIP197_RDR_IRQ(ring)) {
893 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
895 if (unlikely(stat & EIP197_xDR_ERR)) {
897 * Fatal error, the RDR is unusable and must be
898 * reinitialized. This should not happen under
899 * normal circumstances.
901 dev_err(priv->dev, "RDR: fatal error.\n");
902 } else if (likely(stat & EIP197_xDR_THRESH)) {
903 rc = IRQ_WAKE_THREAD;
906 /* ACK the interrupts */
908 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
911 /* ACK the interrupts */
912 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
917 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
919 struct safexcel_ring_irq_data *irq_data = data;
920 struct safexcel_crypto_priv *priv = irq_data->priv;
921 int ring = irq_data->ring;
923 safexcel_handle_result_descriptor(priv, ring);
925 queue_work(priv->ring[ring].workqueue,
926 &priv->ring[ring].work_data.work);
931 static int safexcel_request_ring_irq(void *pdev, int irqid,
933 irq_handler_t handler,
934 irq_handler_t threaded_handler,
935 struct safexcel_ring_irq_data *ring_irq_priv)
940 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
941 struct pci_dev *pci_pdev = pdev;
943 dev = &pci_pdev->dev;
944 irq = pci_irq_vector(pci_pdev, irqid);
946 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
950 } else if (IS_ENABLED(CONFIG_OF)) {
951 struct platform_device *plf_pdev = pdev;
952 char irq_name[6] = {0}; /* "ringX\0" */
954 snprintf(irq_name, 6, "ring%d", irqid);
955 dev = &plf_pdev->dev;
956 irq = platform_get_irq_byname(plf_pdev, irq_name);
959 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
965 ret = devm_request_threaded_irq(dev, irq, handler,
966 threaded_handler, IRQF_ONESHOT,
967 dev_name(dev), ring_irq_priv);
969 dev_err(dev, "unable to request IRQ %d\n", irq);
976 static struct safexcel_alg_template *safexcel_algs[] = {
977 &safexcel_alg_ecb_des,
978 &safexcel_alg_cbc_des,
979 &safexcel_alg_ecb_des3_ede,
980 &safexcel_alg_cbc_des3_ede,
981 &safexcel_alg_ecb_aes,
982 &safexcel_alg_cbc_aes,
983 &safexcel_alg_cfb_aes,
984 &safexcel_alg_ofb_aes,
985 &safexcel_alg_ctr_aes,
988 &safexcel_alg_sha224,
989 &safexcel_alg_sha256,
990 &safexcel_alg_sha384,
991 &safexcel_alg_sha512,
992 &safexcel_alg_hmac_md5,
993 &safexcel_alg_hmac_sha1,
994 &safexcel_alg_hmac_sha224,
995 &safexcel_alg_hmac_sha256,
996 &safexcel_alg_hmac_sha384,
997 &safexcel_alg_hmac_sha512,
998 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
999 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
1000 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
1001 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
1002 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1003 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1004 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1005 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1006 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1007 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1008 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1009 &safexcel_alg_xts_aes,
1014 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1018 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1019 safexcel_algs[i]->priv = priv;
1021 /* Do we have all required base algorithms available? */
1022 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1023 safexcel_algs[i]->algo_mask)
1024 /* No, so don't register this ciphersuite */
1027 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1028 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1029 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1030 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1032 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1041 for (j = 0; j < i; j++) {
1042 /* Do we have all required base algorithms available? */
1043 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1044 safexcel_algs[j]->algo_mask)
1045 /* No, so don't unregister this ciphersuite */
1048 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1049 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1050 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1051 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1053 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1059 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1063 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1064 /* Do we have all required base algorithms available? */
1065 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1066 safexcel_algs[i]->algo_mask)
1067 /* No, so don't unregister this ciphersuite */
1070 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1071 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1072 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1073 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1075 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1079 static void safexcel_configure(struct safexcel_crypto_priv *priv)
1083 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
1085 /* Read number of PEs from the engine */
1086 if (priv->version == EIP97IES_MRVL)
1087 /* Narrow field width for EIP97 type engine */
1088 mask = EIP97_N_PES_MASK;
1090 /* Wider field width for all EIP197 type engines */
1091 mask = EIP197_N_PES_MASK;
1093 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
1095 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
1097 val = (val & GENMASK(27, 25)) >> 25;
1098 mask = BIT(val) - 1;
1100 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
1101 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1103 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
1104 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1107 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1109 struct safexcel_register_offsets *offsets = &priv->offsets;
1111 if (priv->version == EIP97IES_MRVL) {
1112 offsets->hia_aic = EIP97_HIA_AIC_BASE;
1113 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
1114 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
1115 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
1116 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
1117 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
1118 offsets->hia_dse = EIP97_HIA_DSE_BASE;
1119 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
1120 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
1121 offsets->pe = EIP97_PE_BASE;
1123 offsets->hia_aic = EIP197_HIA_AIC_BASE;
1124 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
1125 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
1126 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1127 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1128 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1129 offsets->hia_dse = EIP197_HIA_DSE_BASE;
1130 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1131 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1132 offsets->pe = EIP197_PE_BASE;
1137 * Generic part of probe routine, shared by platform and PCI driver
1139 * Assumes IO resources have been mapped, private data mem has been allocated,
1140 * clocks have been enabled, device pointer has been assigned etc.
1143 static int safexcel_probe_generic(void *pdev,
1144 struct safexcel_crypto_priv *priv,
1147 struct device *dev = priv->dev;
1151 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1152 sizeof(struct safexcel_context_record),
1154 if (!priv->context_pool)
1157 safexcel_init_register_offsets(priv);
1159 /* Get supported algorithms from EIP96 transform engine */
1160 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1161 EIP197_PE_EIP96_OPTIONS(0));
1163 if (priv->version == EIP97IES_MRVL) {
1166 priv->flags |= EIP197_TRC_CACHE;
1170 /* Dump some debug information important during development */
1171 dev_dbg(priv->dev, "Inside Secure EIP%d packetengine\n", peid);
1172 dev_dbg(priv->dev, "Supported algorithms: %08x\n",
1173 priv->hwconfig.algo_flags);
1175 safexcel_configure(priv);
1177 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1179 * Request MSI vectors for global + 1 per ring -
1180 * or just 1 for older dev images
1182 struct pci_dev *pci_pdev = pdev;
1184 ret = pci_alloc_irq_vectors(pci_pdev,
1185 priv->config.rings + 1,
1186 priv->config.rings + 1,
1187 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1189 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1194 /* Register the ring IRQ handlers and configure the rings */
1195 priv->ring = devm_kcalloc(dev, priv->config.rings,
1196 sizeof(*priv->ring),
1201 for (i = 0; i < priv->config.rings; i++) {
1202 char wq_name[9] = {0};
1204 struct safexcel_ring_irq_data *ring_irq;
1206 ret = safexcel_init_ring_descriptors(priv,
1208 &priv->ring[i].rdr);
1210 dev_err(dev, "Failed to initialize rings\n");
1214 priv->ring[i].rdr_req = devm_kcalloc(dev,
1215 EIP197_DEFAULT_RING_SIZE,
1216 sizeof(priv->ring[i].rdr_req),
1218 if (!priv->ring[i].rdr_req)
1221 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1225 ring_irq->priv = priv;
1228 irq = safexcel_request_ring_irq(pdev,
1229 EIP197_IRQ_NUMBER(i, is_pci_dev),
1232 safexcel_irq_ring_thread,
1235 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1239 priv->ring[i].work_data.priv = priv;
1240 priv->ring[i].work_data.ring = i;
1241 INIT_WORK(&priv->ring[i].work_data.work,
1242 safexcel_dequeue_work);
1244 snprintf(wq_name, 9, "wq_ring%d", i);
1245 priv->ring[i].workqueue =
1246 create_singlethread_workqueue(wq_name);
1247 if (!priv->ring[i].workqueue)
1250 priv->ring[i].requests = 0;
1251 priv->ring[i].busy = false;
1253 crypto_init_queue(&priv->ring[i].queue,
1254 EIP197_DEFAULT_RING_SIZE);
1256 spin_lock_init(&priv->ring[i].lock);
1257 spin_lock_init(&priv->ring[i].queue_lock);
1260 atomic_set(&priv->ring_used, 0);
1262 ret = safexcel_hw_init(priv);
1264 dev_err(dev, "HW init failed (%d)\n", ret);
1268 ret = safexcel_register_algorithms(priv);
1270 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1277 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1281 for (i = 0; i < priv->config.rings; i++) {
1282 /* clear any pending interrupt */
1283 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1284 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1286 /* Reset the CDR base address */
1287 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1288 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1290 /* Reset the RDR base address */
1291 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1292 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1296 #if IS_ENABLED(CONFIG_OF)
1297 /* for Device Tree platform driver */
1299 static int safexcel_probe(struct platform_device *pdev)
1301 struct device *dev = &pdev->dev;
1302 struct safexcel_crypto_priv *priv;
1305 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1310 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1312 platform_set_drvdata(pdev, priv);
1314 priv->base = devm_platform_ioremap_resource(pdev, 0);
1315 if (IS_ERR(priv->base)) {
1316 dev_err(dev, "failed to get resource\n");
1317 return PTR_ERR(priv->base);
1320 priv->clk = devm_clk_get(&pdev->dev, NULL);
1321 ret = PTR_ERR_OR_ZERO(priv->clk);
1322 /* The clock isn't mandatory */
1323 if (ret != -ENOENT) {
1327 ret = clk_prepare_enable(priv->clk);
1329 dev_err(dev, "unable to enable clk (%d)\n", ret);
1334 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1335 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1336 /* The clock isn't mandatory */
1337 if (ret != -ENOENT) {
1341 ret = clk_prepare_enable(priv->reg_clk);
1343 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1348 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1352 /* Generic EIP97/EIP197 device probing */
1353 ret = safexcel_probe_generic(pdev, priv, 0);
1360 clk_disable_unprepare(priv->reg_clk);
1362 clk_disable_unprepare(priv->clk);
1366 static int safexcel_remove(struct platform_device *pdev)
1368 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1371 safexcel_unregister_algorithms(priv);
1372 safexcel_hw_reset_rings(priv);
1374 clk_disable_unprepare(priv->clk);
1376 for (i = 0; i < priv->config.rings; i++)
1377 destroy_workqueue(priv->ring[i].workqueue);
1382 static const struct of_device_id safexcel_of_match_table[] = {
1384 .compatible = "inside-secure,safexcel-eip97ies",
1385 .data = (void *)EIP97IES_MRVL,
1388 .compatible = "inside-secure,safexcel-eip197b",
1389 .data = (void *)EIP197B_MRVL,
1392 .compatible = "inside-secure,safexcel-eip197d",
1393 .data = (void *)EIP197D_MRVL,
1395 /* For backward compatibility and intended for generic use */
1397 .compatible = "inside-secure,safexcel-eip97",
1398 .data = (void *)EIP97IES_MRVL,
1401 .compatible = "inside-secure,safexcel-eip197",
1402 .data = (void *)EIP197B_MRVL,
1407 static struct platform_driver crypto_safexcel = {
1408 .probe = safexcel_probe,
1409 .remove = safexcel_remove,
1411 .name = "crypto-safexcel",
1412 .of_match_table = safexcel_of_match_table,
1417 #if IS_ENABLED(CONFIG_PCI)
1418 /* PCIE devices - i.e. Inside Secure development boards */
1420 static int safexcel_pci_probe(struct pci_dev *pdev,
1421 const struct pci_device_id *ent)
1423 struct device *dev = &pdev->dev;
1424 struct safexcel_crypto_priv *priv;
1425 void __iomem *pciebase;
1429 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1430 ent->vendor, ent->device, ent->subvendor,
1431 ent->subdevice, ent->driver_data);
1433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1438 priv->version = (enum safexcel_eip_version)ent->driver_data;
1440 pci_set_drvdata(pdev, priv);
1442 /* enable the device */
1443 rc = pcim_enable_device(pdev);
1445 dev_err(dev, "Failed to enable PCI device\n");
1449 /* take ownership of PCI BAR0 */
1450 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1452 dev_err(dev, "Failed to map IO region for BAR0\n");
1455 priv->base = pcim_iomap_table(pdev)[0];
1457 if (priv->version == EIP197_DEVBRD) {
1458 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1460 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1462 dev_err(dev, "Failed to map IO region for BAR4\n");
1466 pciebase = pcim_iomap_table(pdev)[2];
1467 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1468 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1469 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1472 /* Setup MSI identity map mapping */
1473 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1474 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1475 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1476 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1477 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1478 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1479 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1480 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1482 /* Enable all device interrupts */
1483 writel(GENMASK(31, 0),
1484 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1486 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1491 /* HW reset FPGA dev board */
1493 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1494 wmb(); /* maintain strict ordering for accesses here */
1495 /* deassert reset */
1496 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1497 wmb(); /* maintain strict ordering for accesses here */
1500 /* enable bus mastering */
1501 pci_set_master(pdev);
1503 /* Generic EIP97/EIP197 device probing */
1504 rc = safexcel_probe_generic(pdev, priv, 1);
1508 void safexcel_pci_remove(struct pci_dev *pdev)
1510 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1513 safexcel_unregister_algorithms(priv);
1515 for (i = 0; i < priv->config.rings; i++)
1516 destroy_workqueue(priv->ring[i].workqueue);
1518 safexcel_hw_reset_rings(priv);
1521 static const struct pci_device_id safexcel_pci_ids[] = {
1523 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1525 /* assume EIP197B for now */
1526 .driver_data = EIP197_DEVBRD,
1531 MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1533 static struct pci_driver safexcel_pci_driver = {
1534 .name = "crypto-safexcel",
1535 .id_table = safexcel_pci_ids,
1536 .probe = safexcel_pci_probe,
1537 .remove = safexcel_pci_remove,
1541 static int __init safexcel_init(void)
1545 #if IS_ENABLED(CONFIG_OF)
1546 /* Register platform driver */
1547 platform_driver_register(&crypto_safexcel);
1550 #if IS_ENABLED(CONFIG_PCI)
1551 /* Register PCI driver */
1552 rc = pci_register_driver(&safexcel_pci_driver);
1558 static void __exit safexcel_exit(void)
1560 #if IS_ENABLED(CONFIG_OF)
1561 /* Unregister platform driver */
1562 platform_driver_unregister(&crypto_safexcel);
1565 #if IS_ENABLED(CONFIG_PCI)
1566 /* Unregister PCI driver if successfully registered before */
1567 pci_unregister_driver(&safexcel_pci_driver);
1571 module_init(safexcel_init);
1572 module_exit(safexcel_exit);
1574 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1575 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1576 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1577 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1578 MODULE_LICENSE("GPL v2");