1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
27 static u32 max_rings = EIP197_MAX_RINGS;
28 module_param(max_rings, uint, 0644);
29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31 static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
36 * Map all interfaces/rings to register index 0
37 * so they can share contexts. Without this, the EIP197 will
38 * assume each interface/ring to be in its own memory domain
39 * i.e. have its own subset of UNIQUE memory addresses.
40 * Which would cause records with the SAME memory address to
41 * use DIFFERENT cache buffers, causing both poor cache utilization
42 * AND serious coherence/invalidation issues.
44 for (i = 0; i < 4; i++)
45 writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
51 for (i = 0; i < priv->config.rings; i++) {
52 writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53 writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54 writel(EIP197_FLUE_CONFIG_MAGIC,
55 priv->base + EIP197_FLUE_CONFIG(i));
57 writel(0, priv->base + EIP197_FLUE_OFFSETS);
58 writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
61 static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62 u32 addrmid, int *actbank)
67 curbank = addrmid >> 16;
68 if (curbank != *actbank) {
69 val = readl(priv->base + EIP197_CS_RAM_CTRL);
70 val = (val & ~EIP197_CS_BANKSEL_MASK) |
71 (curbank << EIP197_CS_BANKSEL_OFS);
72 writel(val, priv->base + EIP197_CS_RAM_CTRL);
77 static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78 int maxbanks, u32 probemask, u32 stride)
80 u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
84 * And probe the actual size of the physically attached cache data RAM
85 * Using a binary subdivision algorithm downto 32 byte cache lines.
87 addrhi = 1 << (16 + maxbanks);
89 actbank = min(maxbanks - 1, 0);
90 while ((addrhi - addrlo) > stride) {
91 /* write marker to lowest address in top half */
92 addrmid = (addrhi + addrlo) >> 1;
93 marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
94 eip197_trc_cache_banksel(priv, addrmid, &actbank);
96 priv->base + EIP197_CLASSIFICATION_RAMS +
99 /* write invalid markers to possible aliases */
100 delta = 1 << __fls(addrmid);
101 while (delta >= stride) {
102 addralias = addrmid - delta;
103 eip197_trc_cache_banksel(priv, addralias, &actbank);
105 priv->base + EIP197_CLASSIFICATION_RAMS +
106 (addralias & 0xffff));
110 /* read back marker from top half */
111 eip197_trc_cache_banksel(priv, addrmid, &actbank);
112 val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
115 if ((val & probemask) == marker)
116 /* read back correct, continue with top half */
119 /* not read back correct, continue with bottom half */
125 static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
126 int cs_rc_max, int cs_ht_wc)
129 u32 htable_offset, val, offset;
131 /* Clear all records in administration RAM */
132 for (i = 0; i < cs_rc_max; i++) {
133 offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
135 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
136 EIP197_CS_RC_PREV(EIP197_RC_NULL),
137 priv->base + offset);
139 val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
141 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
142 else if (i == cs_rc_max - 1)
143 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
144 writel(val, priv->base + offset + 4);
145 /* must also initialize the address key due to ECC! */
146 writel(0, priv->base + offset + 8);
147 writel(0, priv->base + offset + 12);
150 /* Clear the hash table entries */
151 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
152 for (i = 0; i < cs_ht_wc; i++)
153 writel(GENMASK(29, 0),
154 priv->base + EIP197_CLASSIFICATION_RAMS +
155 htable_offset + i * sizeof(u32));
158 static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
160 u32 val, dsize, asize;
161 int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
162 int cs_rc_abs_max, cs_ht_sz;
165 /* Setup (dummy) virtualization for cache */
166 eip197_trc_cache_setupvirt(priv);
169 * Enable the record cache memory access and
170 * probe the bank select width
172 val = readl(priv->base + EIP197_CS_RAM_CTRL);
173 val &= ~EIP197_TRC_ENABLE_MASK;
174 val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
175 writel(val, priv->base + EIP197_CS_RAM_CTRL);
176 val = readl(priv->base + EIP197_CS_RAM_CTRL);
177 maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
179 /* Clear all ECC errors */
180 writel(0, priv->base + EIP197_TRC_ECCCTRL);
183 * Make sure the cache memory is accessible by taking record cache into
184 * reset. Need data memory access here, not admin access.
186 val = readl(priv->base + EIP197_TRC_PARAMS);
187 val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
188 writel(val, priv->base + EIP197_TRC_PARAMS);
190 /* Probed data RAM size in bytes */
191 dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
194 * Now probe the administration RAM size pretty much the same way
195 * Except that only the lower 30 bits are writable and we don't need
198 val = readl(priv->base + EIP197_TRC_PARAMS);
199 /* admin access now */
200 val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
201 writel(val, priv->base + EIP197_TRC_PARAMS);
203 /* Probed admin RAM size in admin words */
204 asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
206 /* Clear any ECC errors detected while probing! */
207 writel(0, priv->base + EIP197_TRC_ECCCTRL);
209 /* Sanity check probing results */
210 if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
211 dev_err(priv->dev, "Record cache probing failed (%d,%d).",
217 * Determine optimal configuration from RAM sizes
218 * Note that we assume that the physical RAM configuration is sane
219 * Therefore, we don't do any parameter error checking here ...
222 /* For now, just use a single record format covering everything */
223 cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
224 cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
227 * Step #1: How many records will physically fit?
228 * Hard upper limit is 1023!
230 cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
231 /* Step #2: Need at least 2 words in the admin RAM per record */
232 cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
233 /* Step #3: Determine log2 of hash table size */
234 cs_ht_sz = __fls(asize - cs_rc_max) - 2;
235 /* Step #4: determine current size of hash table in dwords */
236 cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
237 /* Step #5: add back excess words and see if we can fit more records */
238 cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
240 /* Clear the cache RAMs */
241 eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
243 /* Disable the record cache memory access */
244 val = readl(priv->base + EIP197_CS_RAM_CTRL);
245 val &= ~EIP197_TRC_ENABLE_MASK;
246 writel(val, priv->base + EIP197_CS_RAM_CTRL);
248 /* Write head and tail pointers of the record free chain */
249 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
251 writel(val, priv->base + EIP197_TRC_FREECHAIN);
253 /* Configure the record cache #1 */
254 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
255 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
256 writel(val, priv->base + EIP197_TRC_PARAMS2);
258 /* Configure the record cache #2 */
259 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
260 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
262 writel(val, priv->base + EIP197_TRC_PARAMS);
264 dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
265 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
269 static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
274 for (pe = 0; pe < priv->config.pes; pe++) {
275 /* Configure the token FIFO's */
276 writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
277 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
279 /* Clear the ICE scratchpad memory */
280 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
281 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
282 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
283 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
284 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
285 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
287 /* clear the scratchpad RAM using 32 bit writes only */
288 for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
289 writel(0, EIP197_PE(priv) +
290 EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
292 /* Reset the IFPP engine to make its program mem accessible */
293 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
294 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
295 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
296 EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
298 /* Reset the IPUE engine to make its program mem accessible */
299 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
300 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
301 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
302 EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
304 /* Enable access to all IFPP program memories */
305 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
306 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
311 static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
312 const struct firmware *fw)
314 const __be32 *data = (const __be32 *)fw->data;
317 /* Write the firmware */
318 for (i = 0; i < fw->size / sizeof(u32); i++)
319 writel(be32_to_cpu(data[i]),
320 priv->base + EIP197_CLASSIFICATION_RAMS +
323 /* Exclude final 2 NOPs from size */
324 return i - EIP197_FW_TERMINAL_NOPS;
328 * If FW is actual production firmware, then poll for its initialization
329 * to complete and check if it is good for the HW, otherwise just return OK.
331 static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
337 pollofs = EIP197_FW_FPP_READY;
339 pollofs = EIP197_FW_PUE_READY;
341 for (pe = 0; pe < priv->config.pes; pe++) {
342 base = EIP197_PE_ICE_SCRATCH_RAM(pe);
343 pollcnt = EIP197_FW_START_POLLCNT;
345 (readl_relaxed(EIP197_PE(priv) + base +
350 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
358 static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
359 int ipuesz, int ifppsz, int minifw)
364 for (pe = 0; pe < priv->config.pes; pe++) {
365 /* Disable access to all program memory */
366 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
368 /* Start IFPP microengines */
372 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
373 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
374 EIP197_PE_ICE_UENG_DEBUG_RESET;
375 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
377 /* Start IPUE microengines */
381 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
382 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
383 EIP197_PE_ICE_UENG_DEBUG_RESET;
384 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
387 /* For miniFW startup, there is no initialization, so always succeed */
391 /* Wait until all the firmwares have properly started up */
392 if (!poll_fw_ready(priv, 1))
394 if (!poll_fw_ready(priv, 0))
400 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
402 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
403 const struct firmware *fw[FW_NB];
404 char fw_path[37], *dir = NULL;
405 int i, j, ret = 0, pe;
406 int ipuesz, ifppsz, minifw = 0;
408 if (priv->version == EIP197D_MRVL)
410 else if (priv->version == EIP197B_MRVL ||
411 priv->version == EIP197_DEVBRD)
417 for (i = 0; i < FW_NB; i++) {
418 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
419 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
421 if (minifw || priv->version != EIP197B_MRVL)
424 /* Fallback to the old firmware location for the
427 ret = firmware_request_nowarn(&fw[i], fw_name[i],
434 eip197_init_firmware(priv);
436 ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
438 /* Enable access to IPUE program memories */
439 for (pe = 0; pe < priv->config.pes; pe++)
440 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
441 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
443 ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
445 if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
446 dev_dbg(priv->dev, "Firmware loaded successfully\n");
453 for (j = 0; j < i; j++)
454 release_firmware(fw[j]);
457 /* Retry with minifw path */
458 dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
459 dir = "eip197_minifw";
464 dev_dbg(priv->dev, "Firmware load failed.\n");
469 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
471 u32 cd_size_rnd, val;
474 cd_size_rnd = (priv->config.cd_size +
475 (BIT(priv->hwconfig.hwdataw) - 1)) >>
476 priv->hwconfig.hwdataw;
477 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
478 if (priv->flags & SAFEXCEL_HW_EIP197) {
479 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
480 cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
481 cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
482 (priv->config.pes * EIP197_FETCH_DEPTH));
484 /* for the EIP97, just fetch all that fits minus 1 */
485 cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
489 * Since we're using command desc's way larger than formally specified,
490 * we need to check whether we can fit even 1 for low-end EIP196's!
493 dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
497 for (i = 0; i < priv->config.rings; i++) {
498 /* ring base address */
499 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
500 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
501 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
502 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
504 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
505 priv->config.cd_size,
506 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
507 writel(((cd_fetch_cnt *
508 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
509 (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
510 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
512 /* Configure DMA tx control */
513 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
514 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
515 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
517 /* clear any pending interrupt */
518 writel(GENMASK(5, 0),
519 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
525 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
527 u32 rd_size_rnd, val;
530 /* determine number of RD's we can fetch into the FIFO as one block */
531 rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
532 (BIT(priv->hwconfig.hwdataw) - 1)) >>
533 priv->hwconfig.hwdataw;
534 if (priv->flags & SAFEXCEL_HW_EIP197) {
535 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
536 rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
537 rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
538 (priv->config.pes * EIP197_FETCH_DEPTH));
540 /* for the EIP97, just fetch all that fits minus 1 */
541 rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
545 for (i = 0; i < priv->config.rings; i++) {
546 /* ring base address */
547 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
548 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
549 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
550 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
552 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
553 priv->config.rd_size,
554 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
556 writel(((rd_fetch_cnt *
557 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
558 (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
559 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
561 /* Configure DMA tx control */
562 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
563 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
564 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
566 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
568 /* clear any pending interrupt */
569 writel(GENMASK(7, 0),
570 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
572 /* enable ring interrupt */
573 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
574 val |= EIP197_RDR_IRQ(i);
575 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
581 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
584 int i, ret, pe, opbuflo, opbufhi;
586 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
587 priv->config.pes, priv->config.rings);
590 * For EIP197's only set maximum number of TX commands to 2^5 = 32
591 * Skip for the EIP97 as it does not have this field.
593 if (priv->flags & SAFEXCEL_HW_EIP197) {
594 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
595 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
596 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
599 /* Configure wr/rd cache values */
600 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
601 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
602 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
604 /* Interrupts reset */
606 /* Disable all global interrupts */
607 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
609 /* Clear any pending interrupt */
610 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
612 /* Processing Engine configuration */
613 for (pe = 0; pe < priv->config.pes; pe++) {
614 /* Data Fetch Engine configuration */
616 /* Reset all DFE threads */
617 writel(EIP197_DxE_THR_CTRL_RESET_PE,
618 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
620 if (priv->flags & EIP197_PE_ARB)
621 /* Reset HIA input interface arbiter (if present) */
622 writel(EIP197_HIA_RA_PE_CTRL_RESET,
623 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
625 /* DMA transfer size to use */
626 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
627 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
628 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
629 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
630 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
631 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
632 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
633 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
635 /* Leave the DFE threads reset state */
636 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
638 /* Configure the processing engine thresholds */
639 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
640 EIP197_PE_IN_xBUF_THRES_MAX(9),
641 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
642 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
643 EIP197_PE_IN_xBUF_THRES_MAX(7),
644 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
646 if (priv->flags & SAFEXCEL_HW_EIP197)
647 /* enable HIA input interface arbiter and rings */
648 writel(EIP197_HIA_RA_PE_CTRL_EN |
649 GENMASK(priv->config.rings - 1, 0),
650 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
652 /* Data Store Engine configuration */
654 /* Reset all DSE threads */
655 writel(EIP197_DxE_THR_CTRL_RESET_PE,
656 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
658 /* Wait for all DSE threads to complete */
659 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
660 GENMASK(15, 12)) != GENMASK(15, 12))
663 /* DMA transfer size to use */
664 if (priv->hwconfig.hwnumpes > 4) {
671 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
672 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
673 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
674 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
675 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
676 /* FIXME: instability issues can occur for EIP97 but disabling
677 * it impacts performance.
679 if (priv->flags & SAFEXCEL_HW_EIP197)
680 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
681 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
683 /* Leave the DSE threads reset state */
684 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
686 /* Configure the procesing engine thresholds */
687 writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
688 EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
689 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
691 /* Processing Engine configuration */
693 /* Token & context configuration */
694 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
695 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
696 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
697 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
699 /* H/W capabilities selection: just enable everything */
700 writel(EIP197_FUNCTION_ALL,
701 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
702 writel(EIP197_FUNCTION_ALL,
703 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
706 /* Command Descriptor Rings prepare */
707 for (i = 0; i < priv->config.rings; i++) {
708 /* Clear interrupts for this ring */
709 writel(GENMASK(31, 0),
710 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
712 /* Disable external triggering */
713 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
715 /* Clear the pending prepared counter */
716 writel(EIP197_xDR_PREP_CLR_COUNT,
717 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
719 /* Clear the pending processed counter */
720 writel(EIP197_xDR_PROC_CLR_COUNT,
721 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
724 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
726 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
728 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
729 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
732 /* Result Descriptor Ring prepare */
733 for (i = 0; i < priv->config.rings; i++) {
734 /* Disable external triggering*/
735 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
737 /* Clear the pending prepared counter */
738 writel(EIP197_xDR_PREP_CLR_COUNT,
739 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
741 /* Clear the pending processed counter */
742 writel(EIP197_xDR_PROC_CLR_COUNT,
743 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
746 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
748 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
751 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
752 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
755 for (pe = 0; pe < priv->config.pes; pe++) {
756 /* Enable command descriptor rings */
757 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
758 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
760 /* Enable result descriptor rings */
761 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
762 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
765 /* Clear any HIA interrupt */
766 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
768 if (priv->flags & EIP197_SIMPLE_TRC) {
769 writel(EIP197_STRC_CONFIG_INIT |
770 EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
771 EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
772 priv->base + EIP197_STRC_CONFIG);
773 writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
774 EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
775 } else if (priv->flags & SAFEXCEL_HW_EIP197) {
776 ret = eip197_trc_cache_init(priv);
781 if (priv->flags & EIP197_ICE) {
782 ret = eip197_load_firmwares(priv);
787 return safexcel_hw_setup_cdesc_rings(priv) ?:
788 safexcel_hw_setup_rdesc_rings(priv) ?:
792 /* Called with ring's lock taken */
793 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
796 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
801 /* Configure when we want an interrupt */
802 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
803 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
804 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
807 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
809 struct crypto_async_request *req, *backlog;
810 struct safexcel_context *ctx;
811 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
813 /* If a request wasn't properly dequeued because of a lack of resources,
814 * proceeded it first,
816 req = priv->ring[ring].req;
817 backlog = priv->ring[ring].backlog;
822 spin_lock_bh(&priv->ring[ring].queue_lock);
823 backlog = crypto_get_backlog(&priv->ring[ring].queue);
824 req = crypto_dequeue_request(&priv->ring[ring].queue);
825 spin_unlock_bh(&priv->ring[ring].queue_lock);
828 priv->ring[ring].req = NULL;
829 priv->ring[ring].backlog = NULL;
834 ctx = crypto_tfm_ctx(req->tfm);
835 ret = ctx->send(req, ring, &commands, &results);
840 backlog->complete(backlog, -EINPROGRESS);
842 /* In case the send() helper did not issue any command to push
843 * to the engine because the input data was cached, continue to
844 * dequeue other requests as this is valid and not an error.
846 if (!commands && !results)
855 /* Not enough resources to handle all the requests. Bail out and save
856 * the request and the backlog for the next dequeue call (per-ring).
858 priv->ring[ring].req = req;
859 priv->ring[ring].backlog = backlog;
865 spin_lock_bh(&priv->ring[ring].lock);
867 priv->ring[ring].requests += nreq;
869 if (!priv->ring[ring].busy) {
870 safexcel_try_push_requests(priv, ring);
871 priv->ring[ring].busy = true;
874 spin_unlock_bh(&priv->ring[ring].lock);
876 /* let the RDR know we have pending descriptors */
877 writel((rdesc * priv->config.rd_offset),
878 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
880 /* let the CDR know we have pending descriptors */
881 writel((cdesc * priv->config.cd_offset),
882 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
885 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
888 struct safexcel_result_desc *rdesc = rdp;
889 struct result_data_desc *result_data = rdp + priv->config.res_offset;
891 if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
892 ((!rdesc->descriptor_overflow) &&
893 (!rdesc->buffer_overflow) &&
894 (!result_data->error_code))))
897 if (rdesc->descriptor_overflow)
898 dev_err(priv->dev, "Descriptor overflow detected");
900 if (rdesc->buffer_overflow)
901 dev_err(priv->dev, "Buffer overflow detected");
903 if (result_data->error_code & 0x4066) {
904 /* Fatal error (bits 1,2,5,6 & 14) */
906 "result descriptor error (%x)",
907 result_data->error_code);
910 } else if (result_data->error_code &
911 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
913 * Give priority over authentication fails:
914 * Blocksize, length & overflow errors,
915 * something wrong with the input!
918 } else if (result_data->error_code & BIT(9)) {
919 /* Authentication failed */
923 /* All other non-fatal errors */
927 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
929 struct safexcel_result_desc *rdesc,
930 struct crypto_async_request *req)
932 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
934 priv->ring[ring].rdr_req[i] = req;
937 inline struct crypto_async_request *
938 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
940 int i = safexcel_ring_first_rdr_index(priv, ring);
942 return priv->ring[ring].rdr_req[i];
945 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
947 struct safexcel_command_desc *cdesc;
949 /* Acknowledge the command descriptors */
951 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
954 "Could not retrieve the command descriptor\n");
957 } while (!cdesc->last_seg);
960 void safexcel_inv_complete(struct crypto_async_request *req, int error)
962 struct safexcel_inv_result *result = req->data;
964 if (error == -EINPROGRESS)
967 result->error = error;
968 complete(&result->completion);
971 int safexcel_invalidate_cache(struct crypto_async_request *async,
972 struct safexcel_crypto_priv *priv,
973 dma_addr_t ctxr_dma, int ring)
975 struct safexcel_command_desc *cdesc;
976 struct safexcel_result_desc *rdesc;
979 /* Prepare command descriptor */
980 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
982 return PTR_ERR(cdesc);
984 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
985 cdesc->control_data.options = 0;
986 cdesc->control_data.refresh = 0;
987 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
989 /* Prepare result descriptor */
990 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
993 ret = PTR_ERR(rdesc);
997 safexcel_rdr_req_set(priv, ring, rdesc, async);
1002 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
1007 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
1010 struct crypto_async_request *req;
1011 struct safexcel_context *ctx;
1012 int ret, i, nreq, ndesc, tot_descs, handled = 0;
1013 bool should_complete;
1018 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1019 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
1020 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
1024 for (i = 0; i < nreq; i++) {
1025 req = safexcel_rdr_req_get(priv, ring);
1027 ctx = crypto_tfm_ctx(req->tfm);
1028 ndesc = ctx->handle_result(priv, ring, req,
1029 &should_complete, &ret);
1031 dev_err(priv->dev, "failed to handle result (%d)\n",
1036 if (should_complete) {
1038 req->complete(req, ret);
1048 writel(EIP197_xDR_PROC_xD_PKT(i) |
1049 (tot_descs * priv->config.rd_offset),
1050 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1052 /* If the number of requests overflowed the counter, try to proceed more
1055 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
1056 goto handle_results;
1059 spin_lock_bh(&priv->ring[ring].lock);
1061 priv->ring[ring].requests -= handled;
1062 safexcel_try_push_requests(priv, ring);
1064 if (!priv->ring[ring].requests)
1065 priv->ring[ring].busy = false;
1067 spin_unlock_bh(&priv->ring[ring].lock);
1070 static void safexcel_dequeue_work(struct work_struct *work)
1072 struct safexcel_work_data *data =
1073 container_of(work, struct safexcel_work_data, work);
1075 safexcel_dequeue(data->priv, data->ring);
1078 struct safexcel_ring_irq_data {
1079 struct safexcel_crypto_priv *priv;
1083 static irqreturn_t safexcel_irq_ring(int irq, void *data)
1085 struct safexcel_ring_irq_data *irq_data = data;
1086 struct safexcel_crypto_priv *priv = irq_data->priv;
1087 int ring = irq_data->ring, rc = IRQ_NONE;
1090 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1094 /* RDR interrupts */
1095 if (status & EIP197_RDR_IRQ(ring)) {
1096 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1098 if (unlikely(stat & EIP197_xDR_ERR)) {
1100 * Fatal error, the RDR is unusable and must be
1101 * reinitialized. This should not happen under
1102 * normal circumstances.
1104 dev_err(priv->dev, "RDR: fatal error.\n");
1105 } else if (likely(stat & EIP197_xDR_THRESH)) {
1106 rc = IRQ_WAKE_THREAD;
1109 /* ACK the interrupts */
1111 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1114 /* ACK the interrupts */
1115 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1120 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
1122 struct safexcel_ring_irq_data *irq_data = data;
1123 struct safexcel_crypto_priv *priv = irq_data->priv;
1124 int ring = irq_data->ring;
1126 safexcel_handle_result_descriptor(priv, ring);
1128 queue_work(priv->ring[ring].workqueue,
1129 &priv->ring[ring].work_data.work);
1134 static int safexcel_request_ring_irq(void *pdev, int irqid,
1136 irq_handler_t handler,
1137 irq_handler_t threaded_handler,
1138 struct safexcel_ring_irq_data *ring_irq_priv)
1143 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1144 struct pci_dev *pci_pdev = pdev;
1146 dev = &pci_pdev->dev;
1147 irq = pci_irq_vector(pci_pdev, irqid);
1149 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1153 } else if (IS_ENABLED(CONFIG_OF)) {
1154 struct platform_device *plf_pdev = pdev;
1155 char irq_name[6] = {0}; /* "ringX\0" */
1157 snprintf(irq_name, 6, "ring%d", irqid);
1158 dev = &plf_pdev->dev;
1159 irq = platform_get_irq_byname(plf_pdev, irq_name);
1162 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
1170 ret = devm_request_threaded_irq(dev, irq, handler,
1171 threaded_handler, IRQF_ONESHOT,
1172 dev_name(dev), ring_irq_priv);
1174 dev_err(dev, "unable to request IRQ %d\n", irq);
1181 static struct safexcel_alg_template *safexcel_algs[] = {
1182 &safexcel_alg_ecb_des,
1183 &safexcel_alg_cbc_des,
1184 &safexcel_alg_ecb_des3_ede,
1185 &safexcel_alg_cbc_des3_ede,
1186 &safexcel_alg_ecb_aes,
1187 &safexcel_alg_cbc_aes,
1188 &safexcel_alg_cfb_aes,
1189 &safexcel_alg_ofb_aes,
1190 &safexcel_alg_ctr_aes,
1193 &safexcel_alg_sha224,
1194 &safexcel_alg_sha256,
1195 &safexcel_alg_sha384,
1196 &safexcel_alg_sha512,
1197 &safexcel_alg_hmac_md5,
1198 &safexcel_alg_hmac_sha1,
1199 &safexcel_alg_hmac_sha224,
1200 &safexcel_alg_hmac_sha256,
1201 &safexcel_alg_hmac_sha384,
1202 &safexcel_alg_hmac_sha512,
1203 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
1204 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
1205 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
1206 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
1207 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1208 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1209 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1210 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1211 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1212 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1213 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1214 &safexcel_alg_xts_aes,
1217 &safexcel_alg_crc32,
1218 &safexcel_alg_cbcmac,
1219 &safexcel_alg_xcbcmac,
1221 &safexcel_alg_chacha20,
1222 &safexcel_alg_chachapoly,
1223 &safexcel_alg_chachapoly_esp,
1225 &safexcel_alg_hmac_sm3,
1226 &safexcel_alg_ecb_sm4,
1227 &safexcel_alg_cbc_sm4,
1228 &safexcel_alg_ofb_sm4,
1229 &safexcel_alg_cfb_sm4,
1230 &safexcel_alg_ctr_sm4,
1231 &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
1232 &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
1233 &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
1234 &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
1235 &safexcel_alg_sha3_224,
1236 &safexcel_alg_sha3_256,
1237 &safexcel_alg_sha3_384,
1238 &safexcel_alg_sha3_512,
1239 &safexcel_alg_hmac_sha3_224,
1240 &safexcel_alg_hmac_sha3_256,
1241 &safexcel_alg_hmac_sha3_384,
1242 &safexcel_alg_hmac_sha3_512,
1243 &safexcel_alg_authenc_hmac_sha1_cbc_des,
1244 &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
1245 &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
1246 &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
1247 &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
1248 &safexcel_alg_authenc_hmac_sha256_cbc_des,
1249 &safexcel_alg_authenc_hmac_sha224_cbc_des,
1250 &safexcel_alg_authenc_hmac_sha512_cbc_des,
1251 &safexcel_alg_authenc_hmac_sha384_cbc_des,
1252 &safexcel_alg_rfc4106_gcm,
1253 &safexcel_alg_rfc4543_gcm,
1254 &safexcel_alg_rfc4309_ccm,
1257 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1261 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1262 safexcel_algs[i]->priv = priv;
1264 /* Do we have all required base algorithms available? */
1265 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1266 safexcel_algs[i]->algo_mask)
1267 /* No, so don't register this ciphersuite */
1270 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1271 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1272 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1273 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1275 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1284 for (j = 0; j < i; j++) {
1285 /* Do we have all required base algorithms available? */
1286 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1287 safexcel_algs[j]->algo_mask)
1288 /* No, so don't unregister this ciphersuite */
1291 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1292 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1293 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1294 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1296 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1302 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1306 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1307 /* Do we have all required base algorithms available? */
1308 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1309 safexcel_algs[i]->algo_mask)
1310 /* No, so don't unregister this ciphersuite */
1313 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1314 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1315 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1316 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1318 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1322 static void safexcel_configure(struct safexcel_crypto_priv *priv)
1324 u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
1326 priv->config.pes = priv->hwconfig.hwnumpes;
1327 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
1328 /* Cannot currently support more rings than we have ring AICs! */
1329 priv->config.rings = min_t(u32, priv->config.rings,
1330 priv->hwconfig.hwnumraic);
1332 priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
1333 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1335 /* res token is behind the descr, but ofs must be rounded to buswdth */
1336 priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
1337 /* now the size of the descr is this 1st part plus the result struct */
1338 priv->config.rd_size = priv->config.res_offset +
1339 EIP197_RD64_RESULT_SIZE;
1340 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1342 /* convert dwords to bytes */
1343 priv->config.cd_offset *= sizeof(u32);
1344 priv->config.rd_offset *= sizeof(u32);
1345 priv->config.res_offset *= sizeof(u32);
1348 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1350 struct safexcel_register_offsets *offsets = &priv->offsets;
1352 if (priv->flags & SAFEXCEL_HW_EIP197) {
1353 offsets->hia_aic = EIP197_HIA_AIC_BASE;
1354 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
1355 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
1356 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1357 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1358 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1359 offsets->hia_dse = EIP197_HIA_DSE_BASE;
1360 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1361 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1362 offsets->pe = EIP197_PE_BASE;
1363 offsets->global = EIP197_GLOBAL_BASE;
1365 offsets->hia_aic = EIP97_HIA_AIC_BASE;
1366 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
1367 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
1368 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
1369 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
1370 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
1371 offsets->hia_dse = EIP97_HIA_DSE_BASE;
1372 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
1373 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
1374 offsets->pe = EIP97_PE_BASE;
1375 offsets->global = EIP97_GLOBAL_BASE;
1380 * Generic part of probe routine, shared by platform and PCI driver
1382 * Assumes IO resources have been mapped, private data mem has been allocated,
1383 * clocks have been enabled, device pointer has been assigned etc.
1386 static int safexcel_probe_generic(void *pdev,
1387 struct safexcel_crypto_priv *priv,
1390 struct device *dev = priv->dev;
1391 u32 peid, version, mask, val, hiaopt, hwopt, peopt;
1394 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1395 sizeof(struct safexcel_context_record),
1397 if (!priv->context_pool)
1401 * First try the EIP97 HIA version regs
1402 * For the EIP197, this is guaranteed to NOT return any of the test
1405 version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1407 mask = 0; /* do not swap */
1408 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1409 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1410 } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1411 /* read back byte-swapped, so complement byte swap bits */
1412 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1413 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1415 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1416 version = readl(priv->base + EIP197_HIA_AIC_BASE +
1417 EIP197_HIA_VERSION);
1418 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1419 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1420 priv->flags |= SAFEXCEL_HW_EIP197;
1421 } else if (EIP197_REG_HI16(version) ==
1422 EIP197_HIA_VERSION_BE) {
1423 /* read back byte-swapped, so complement swap bits */
1424 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1425 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1426 priv->flags |= SAFEXCEL_HW_EIP197;
1432 /* Now initialize the reg offsets based on the probing info so far */
1433 safexcel_init_register_offsets(priv);
1436 * If the version was read byte-swapped, we need to flip the device
1437 * swapping Keep in mind here, though, that what we write will also be
1441 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1442 val = val ^ (mask >> 24); /* toggle byte swap bits */
1443 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1447 * We're not done probing yet! We may fall through to here if no HIA
1448 * was found at all. So, with the endianness presumably correct now and
1449 * the offsets setup, *really* probe for the EIP97/EIP197.
1451 version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1452 if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1453 (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
1454 (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
1455 ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1456 (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1458 * We did not find the device that matched our initial probing
1459 * (or our initial probing failed) Report appropriate error.
1461 dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1466 priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1467 hwctg = version >> 28;
1468 peid = version & 255;
1470 /* Detect EIP206 processing pipe */
1471 version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
1472 if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
1473 dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
1476 priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
1478 /* Detect EIP96 packet engine and version */
1479 version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1480 if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1481 dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1484 priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1486 hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
1487 hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1489 if (priv->flags & SAFEXCEL_HW_EIP197) {
1491 peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
1493 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1494 EIP197_HWDATAW_MASK;
1495 priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1496 EIP197_CFSIZE_MASK) +
1497 EIP197_CFSIZE_ADJUST;
1498 priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1499 EIP197_RFSIZE_MASK) +
1500 EIP197_RFSIZE_ADJUST;
1501 priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
1503 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1504 EIP197_N_RINGS_MASK;
1505 if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
1506 priv->flags |= EIP197_PE_ARB;
1507 if (EIP206_OPT_ICE_TYPE(peopt) == 1)
1508 priv->flags |= EIP197_ICE;
1509 /* If not a full TRC, then assume simple TRC */
1510 if (!(hwopt & EIP197_OPT_HAS_TRC))
1511 priv->flags |= EIP197_SIMPLE_TRC;
1512 /* EIP197 always has SOME form of TRC */
1513 priv->flags |= EIP197_TRC_CACHE;
1516 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1518 priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1520 priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1522 priv->hwconfig.hwnumpes = 1; /* by definition */
1523 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1524 EIP197_N_RINGS_MASK;
1527 /* Scan for ring AIC's */
1528 for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
1529 version = readl(EIP197_HIA_AIC_R(priv) +
1530 EIP197_HIA_AIC_R_VERSION(i));
1531 if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
1534 priv->hwconfig.hwnumraic = i;
1535 /* Low-end EIP196 may not have any ring AIC's ... */
1536 if (!priv->hwconfig.hwnumraic) {
1537 dev_err(priv->dev, "No ring interrupt controller present!\n");
1541 /* Get supported algorithms from EIP96 transform engine */
1542 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1543 EIP197_PE_EIP96_OPTIONS(0));
1545 /* Print single info line describing what we just detected */
1546 dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
1547 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
1548 priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
1549 priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
1550 priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
1551 priv->hwconfig.ppver, priv->hwconfig.pever,
1552 priv->hwconfig.algo_flags);
1554 safexcel_configure(priv);
1556 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1558 * Request MSI vectors for global + 1 per ring -
1559 * or just 1 for older dev images
1561 struct pci_dev *pci_pdev = pdev;
1563 ret = pci_alloc_irq_vectors(pci_pdev,
1564 priv->config.rings + 1,
1565 priv->config.rings + 1,
1566 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1568 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1573 /* Register the ring IRQ handlers and configure the rings */
1574 priv->ring = devm_kcalloc(dev, priv->config.rings,
1575 sizeof(*priv->ring),
1580 for (i = 0; i < priv->config.rings; i++) {
1581 char wq_name[9] = {0};
1583 struct safexcel_ring_irq_data *ring_irq;
1585 ret = safexcel_init_ring_descriptors(priv,
1587 &priv->ring[i].rdr);
1589 dev_err(dev, "Failed to initialize rings\n");
1593 priv->ring[i].rdr_req = devm_kcalloc(dev,
1594 EIP197_DEFAULT_RING_SIZE,
1595 sizeof(priv->ring[i].rdr_req),
1597 if (!priv->ring[i].rdr_req)
1600 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1604 ring_irq->priv = priv;
1607 irq = safexcel_request_ring_irq(pdev,
1608 EIP197_IRQ_NUMBER(i, is_pci_dev),
1611 safexcel_irq_ring_thread,
1614 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1618 priv->ring[i].work_data.priv = priv;
1619 priv->ring[i].work_data.ring = i;
1620 INIT_WORK(&priv->ring[i].work_data.work,
1621 safexcel_dequeue_work);
1623 snprintf(wq_name, 9, "wq_ring%d", i);
1624 priv->ring[i].workqueue =
1625 create_singlethread_workqueue(wq_name);
1626 if (!priv->ring[i].workqueue)
1629 priv->ring[i].requests = 0;
1630 priv->ring[i].busy = false;
1632 crypto_init_queue(&priv->ring[i].queue,
1633 EIP197_DEFAULT_RING_SIZE);
1635 spin_lock_init(&priv->ring[i].lock);
1636 spin_lock_init(&priv->ring[i].queue_lock);
1639 atomic_set(&priv->ring_used, 0);
1641 ret = safexcel_hw_init(priv);
1643 dev_err(dev, "HW init failed (%d)\n", ret);
1647 ret = safexcel_register_algorithms(priv);
1649 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1656 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1660 for (i = 0; i < priv->config.rings; i++) {
1661 /* clear any pending interrupt */
1662 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1663 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1665 /* Reset the CDR base address */
1666 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1667 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1669 /* Reset the RDR base address */
1670 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1671 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1675 /* for Device Tree platform driver */
1677 static int safexcel_probe(struct platform_device *pdev)
1679 struct device *dev = &pdev->dev;
1680 struct safexcel_crypto_priv *priv;
1683 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1688 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1690 platform_set_drvdata(pdev, priv);
1692 priv->base = devm_platform_ioremap_resource(pdev, 0);
1693 if (IS_ERR(priv->base)) {
1694 dev_err(dev, "failed to get resource\n");
1695 return PTR_ERR(priv->base);
1698 priv->clk = devm_clk_get(&pdev->dev, NULL);
1699 ret = PTR_ERR_OR_ZERO(priv->clk);
1700 /* The clock isn't mandatory */
1701 if (ret != -ENOENT) {
1705 ret = clk_prepare_enable(priv->clk);
1707 dev_err(dev, "unable to enable clk (%d)\n", ret);
1712 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1713 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1714 /* The clock isn't mandatory */
1715 if (ret != -ENOENT) {
1719 ret = clk_prepare_enable(priv->reg_clk);
1721 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1726 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1730 /* Generic EIP97/EIP197 device probing */
1731 ret = safexcel_probe_generic(pdev, priv, 0);
1738 clk_disable_unprepare(priv->reg_clk);
1740 clk_disable_unprepare(priv->clk);
1744 static int safexcel_remove(struct platform_device *pdev)
1746 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1749 safexcel_unregister_algorithms(priv);
1750 safexcel_hw_reset_rings(priv);
1752 clk_disable_unprepare(priv->reg_clk);
1753 clk_disable_unprepare(priv->clk);
1755 for (i = 0; i < priv->config.rings; i++)
1756 destroy_workqueue(priv->ring[i].workqueue);
1761 static const struct of_device_id safexcel_of_match_table[] = {
1763 .compatible = "inside-secure,safexcel-eip97ies",
1764 .data = (void *)EIP97IES_MRVL,
1767 .compatible = "inside-secure,safexcel-eip197b",
1768 .data = (void *)EIP197B_MRVL,
1771 .compatible = "inside-secure,safexcel-eip197d",
1772 .data = (void *)EIP197D_MRVL,
1774 /* For backward compatibility and intended for generic use */
1776 .compatible = "inside-secure,safexcel-eip97",
1777 .data = (void *)EIP97IES_MRVL,
1780 .compatible = "inside-secure,safexcel-eip197",
1781 .data = (void *)EIP197B_MRVL,
1786 static struct platform_driver crypto_safexcel = {
1787 .probe = safexcel_probe,
1788 .remove = safexcel_remove,
1790 .name = "crypto-safexcel",
1791 .of_match_table = safexcel_of_match_table,
1795 /* PCIE devices - i.e. Inside Secure development boards */
1797 static int safexcel_pci_probe(struct pci_dev *pdev,
1798 const struct pci_device_id *ent)
1800 struct device *dev = &pdev->dev;
1801 struct safexcel_crypto_priv *priv;
1802 void __iomem *pciebase;
1806 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1807 ent->vendor, ent->device, ent->subvendor,
1808 ent->subdevice, ent->driver_data);
1810 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1815 priv->version = (enum safexcel_eip_version)ent->driver_data;
1817 pci_set_drvdata(pdev, priv);
1819 /* enable the device */
1820 rc = pcim_enable_device(pdev);
1822 dev_err(dev, "Failed to enable PCI device\n");
1826 /* take ownership of PCI BAR0 */
1827 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1829 dev_err(dev, "Failed to map IO region for BAR0\n");
1832 priv->base = pcim_iomap_table(pdev)[0];
1834 if (priv->version == EIP197_DEVBRD) {
1835 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1837 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1839 dev_err(dev, "Failed to map IO region for BAR4\n");
1843 pciebase = pcim_iomap_table(pdev)[2];
1844 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1845 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1846 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1849 /* Setup MSI identity map mapping */
1850 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1851 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1852 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1853 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1854 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1855 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1856 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1857 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1859 /* Enable all device interrupts */
1860 writel(GENMASK(31, 0),
1861 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1863 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1868 /* HW reset FPGA dev board */
1870 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1871 wmb(); /* maintain strict ordering for accesses here */
1872 /* deassert reset */
1873 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1874 wmb(); /* maintain strict ordering for accesses here */
1877 /* enable bus mastering */
1878 pci_set_master(pdev);
1880 /* Generic EIP97/EIP197 device probing */
1881 rc = safexcel_probe_generic(pdev, priv, 1);
1885 static void safexcel_pci_remove(struct pci_dev *pdev)
1887 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1890 safexcel_unregister_algorithms(priv);
1892 for (i = 0; i < priv->config.rings; i++)
1893 destroy_workqueue(priv->ring[i].workqueue);
1895 safexcel_hw_reset_rings(priv);
1898 static const struct pci_device_id safexcel_pci_ids[] = {
1900 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1902 .driver_data = EIP197_DEVBRD,
1907 MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1909 static struct pci_driver safexcel_pci_driver = {
1910 .name = "crypto-safexcel",
1911 .id_table = safexcel_pci_ids,
1912 .probe = safexcel_pci_probe,
1913 .remove = safexcel_pci_remove,
1916 static int __init safexcel_init(void)
1920 /* Register PCI driver */
1921 ret = pci_register_driver(&safexcel_pci_driver);
1923 /* Register platform driver */
1924 if (IS_ENABLED(CONFIG_OF) && !ret) {
1925 ret = platform_driver_register(&crypto_safexcel);
1927 pci_unregister_driver(&safexcel_pci_driver);
1933 static void __exit safexcel_exit(void)
1935 /* Unregister platform driver */
1936 if (IS_ENABLED(CONFIG_OF))
1937 platform_driver_unregister(&crypto_safexcel);
1939 /* Unregister PCI driver if successfully registered before */
1940 pci_unregister_driver(&safexcel_pci_driver);
1943 module_init(safexcel_init);
1944 module_exit(safexcel_exit);
1946 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1947 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1948 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1949 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1950 MODULE_LICENSE("GPL v2");