crypto: inside-secure - add support for PCI based FPGA development board
[linux-2.6-microblaze.git] / drivers / crypto / inside-secure / safexcel.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Marvell
4  *
5  * Antoine Tenart <antoine.tenart@free-electrons.com>
6  */
7
8 #include <linux/clk.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/workqueue.h>
20
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/skcipher.h>
24
25 #include "safexcel.h"
26
27 static u32 max_rings = EIP197_MAX_RINGS;
28 module_param(max_rings, uint, 0644);
29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
30
31 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
32 {
33         u32 val, htable_offset;
34         int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
35
36         if (priv->version == EIP197D_MRVL) {
37                 cs_rc_max = EIP197D_CS_RC_MAX;
38                 cs_ht_wc = EIP197D_CS_HT_WC;
39                 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
40                 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
41         } else {
42                 /* Default to minimum "safe" settings */
43                 cs_rc_max = EIP197B_CS_RC_MAX;
44                 cs_ht_wc = EIP197B_CS_HT_WC;
45                 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
46                 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
47         }
48
49         /* Enable the record cache memory access */
50         val = readl(priv->base + EIP197_CS_RAM_CTRL);
51         val &= ~EIP197_TRC_ENABLE_MASK;
52         val |= EIP197_TRC_ENABLE_0;
53         writel(val, priv->base + EIP197_CS_RAM_CTRL);
54
55         /* Clear all ECC errors */
56         writel(0, priv->base + EIP197_TRC_ECCCTRL);
57
58         /*
59          * Make sure the cache memory is accessible by taking record cache into
60          * reset.
61          */
62         val = readl(priv->base + EIP197_TRC_PARAMS);
63         val |= EIP197_TRC_PARAMS_SW_RESET;
64         val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
65         writel(val, priv->base + EIP197_TRC_PARAMS);
66
67         /* Clear all records */
68         for (i = 0; i < cs_rc_max; i++) {
69                 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
70
71                 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
72                        EIP197_CS_RC_PREV(EIP197_RC_NULL),
73                        priv->base + offset);
74
75                 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
76                 if (i == 0)
77                         val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
78                 else if (i == cs_rc_max - 1)
79                         val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
80                 writel(val, priv->base + offset + sizeof(u32));
81         }
82
83         /* Clear the hash table entries */
84         htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
85         for (i = 0; i < cs_ht_wc; i++)
86                 writel(GENMASK(29, 0),
87                        priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
88
89         /* Disable the record cache memory access */
90         val = readl(priv->base + EIP197_CS_RAM_CTRL);
91         val &= ~EIP197_TRC_ENABLE_MASK;
92         writel(val, priv->base + EIP197_CS_RAM_CTRL);
93
94         /* Write head and tail pointers of the record free chain */
95         val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
96               EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
97         writel(val, priv->base + EIP197_TRC_FREECHAIN);
98
99         /* Configure the record cache #1 */
100         val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
101               EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
102         writel(val, priv->base + EIP197_TRC_PARAMS2);
103
104         /* Configure the record cache #2 */
105         val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
106               EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
107               EIP197_TRC_PARAMS_HTABLE_SZ(2);
108         writel(val, priv->base + EIP197_TRC_PARAMS);
109 }
110
111 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
112                                   const struct firmware *fw, int pe, u32 ctrl,
113                                   u32 prog_en)
114 {
115         const u32 *data = (const u32 *)fw->data;
116         u32 val;
117         int i;
118
119         /* Reset the engine to make its program memory accessible */
120         writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
121                EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
122                EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
123                EIP197_PE(priv) + ctrl);
124
125         /* Enable access to the program memory */
126         writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
127
128         /* Write the firmware */
129         for (i = 0; i < fw->size / sizeof(u32); i++)
130                 writel(be32_to_cpu(data[i]),
131                        priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
132
133         /* Disable access to the program memory */
134         writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
135
136         /* Release engine from reset */
137         val = readl(EIP197_PE(priv) + ctrl);
138         val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
139         writel(val, EIP197_PE(priv) + ctrl);
140 }
141
142 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
143 {
144         const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
145         const struct firmware *fw[FW_NB];
146         char fw_path[31], *dir = NULL;
147         int i, j, ret = 0, pe;
148         u32 val;
149
150         if (priv->version == EIP197D_MRVL)
151                 dir = "eip197d";
152         else if (priv->version == EIP197B_MRVL ||
153                  priv->version == EIP197_DEVBRD)
154                 dir = "eip197b";
155         else
156                 return -ENODEV;
157
158         for (i = 0; i < FW_NB; i++) {
159                 snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
160                 ret = request_firmware(&fw[i], fw_path, priv->dev);
161                 if (ret) {
162                         if (priv->version != EIP197B_MRVL)
163                                 goto release_fw;
164
165                         /* Fallback to the old firmware location for the
166                          * EIP197b.
167                          */
168                         ret = request_firmware(&fw[i], fw_name[i], priv->dev);
169                         if (ret) {
170                                 dev_err(priv->dev,
171                                         "Failed to request firmware %s (%d)\n",
172                                         fw_name[i], ret);
173                                 goto release_fw;
174                         }
175                 }
176         }
177
178         for (pe = 0; pe < priv->config.pes; pe++) {
179                 /* Clear the scratchpad memory */
180                 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
181                 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
182                        EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
183                        EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
184                        EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
185                 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
186
187                 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
188                           EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
189
190                 eip197_write_firmware(priv, fw[FW_IFPP], pe,
191                                       EIP197_PE_ICE_FPP_CTRL(pe),
192                                       EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
193
194                 eip197_write_firmware(priv, fw[FW_IPUE], pe,
195                                       EIP197_PE_ICE_PUE_CTRL(pe),
196                                       EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
197         }
198
199 release_fw:
200         for (j = 0; j < i; j++)
201                 release_firmware(fw[j]);
202
203         return ret;
204 }
205
206 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
207 {
208         u32 hdw, cd_size_rnd, val;
209         int i;
210
211         hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
212         hdw &= GENMASK(27, 25);
213         hdw >>= 25;
214
215         cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
216
217         for (i = 0; i < priv->config.rings; i++) {
218                 /* ring base address */
219                 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
220                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
221                 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
222                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
223
224                 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
225                        priv->config.cd_size,
226                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
227                 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
228                        (EIP197_FETCH_COUNT * priv->config.cd_offset),
229                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
230
231                 /* Configure DMA tx control */
232                 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
233                 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
234                 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
235
236                 /* clear any pending interrupt */
237                 writel(GENMASK(5, 0),
238                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
239         }
240
241         return 0;
242 }
243
244 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
245 {
246         u32 hdw, rd_size_rnd, val;
247         int i;
248
249         hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
250         hdw &= GENMASK(27, 25);
251         hdw >>= 25;
252
253         rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
254
255         for (i = 0; i < priv->config.rings; i++) {
256                 /* ring base address */
257                 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
258                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
259                 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
260                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
261
262                 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
263                        priv->config.rd_size,
264                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
265
266                 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
267                        (EIP197_FETCH_COUNT * priv->config.rd_offset),
268                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
269
270                 /* Configure DMA tx control */
271                 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
272                 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
273                 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
274                 writel(val,
275                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
276
277                 /* clear any pending interrupt */
278                 writel(GENMASK(7, 0),
279                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
280
281                 /* enable ring interrupt */
282                 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
283                 val |= EIP197_RDR_IRQ(i);
284                 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
285         }
286
287         return 0;
288 }
289
290 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
291 {
292         u32 version, val;
293         int i, ret, pe;
294
295         dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
296                 priv->config.pes, priv->config.rings);
297
298         /* Determine endianess and configure byte swap */
299         version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
300         val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
301
302         if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
303                 val |= EIP197_MST_CTRL_BYTE_SWAP;
304         else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
305                 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
306
307         /*
308          * For EIP197's only set maximum number of TX commands to 2^5 = 32
309          * Skip for the EIP97 as it does not have this field.
310          */
311         if (priv->version != EIP97IES_MRVL)
312                 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
313
314         writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
315
316         /* Configure wr/rd cache values */
317         writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
318                EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
319                EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
320
321         /* Interrupts reset */
322
323         /* Disable all global interrupts */
324         writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
325
326         /* Clear any pending interrupt */
327         writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
328
329         /* Processing Engine configuration */
330         for (pe = 0; pe < priv->config.pes; pe++) {
331                 /* Data Fetch Engine configuration */
332
333                 /* Reset all DFE threads */
334                 writel(EIP197_DxE_THR_CTRL_RESET_PE,
335                        EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
336
337                 if (priv->version != EIP97IES_MRVL)
338                         /* Reset HIA input interface arbiter (EIP197 only) */
339                         writel(EIP197_HIA_RA_PE_CTRL_RESET,
340                                EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
341
342                 /* DMA transfer size to use */
343                 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
344                 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
345                        EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
346                 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
347                        EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
348                 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
349                 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
350                 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
351
352                 /* Leave the DFE threads reset state */
353                 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
354
355                 /* Configure the processing engine thresholds */
356                 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
357                        EIP197_PE_IN_xBUF_THRES_MAX(9),
358                        EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
359                 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
360                        EIP197_PE_IN_xBUF_THRES_MAX(7),
361                        EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
362
363                 if (priv->version != EIP97IES_MRVL)
364                         /* enable HIA input interface arbiter and rings */
365                         writel(EIP197_HIA_RA_PE_CTRL_EN |
366                                GENMASK(priv->config.rings - 1, 0),
367                                EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
368
369                 /* Data Store Engine configuration */
370
371                 /* Reset all DSE threads */
372                 writel(EIP197_DxE_THR_CTRL_RESET_PE,
373                        EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
374
375                 /* Wait for all DSE threads to complete */
376                 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
377                         GENMASK(15, 12)) != GENMASK(15, 12))
378                         ;
379
380                 /* DMA transfer size to use */
381                 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
382                 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
383                        EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
384                 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
385                 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
386                 /* FIXME: instability issues can occur for EIP97 but disabling
387                  * it impacts performance.
388                  */
389                 if (priv->version != EIP97IES_MRVL)
390                         val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
391                 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
392
393                 /* Leave the DSE threads reset state */
394                 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
395
396                 /* Configure the procesing engine thresholds */
397                 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
398                        EIP197_PE_OUT_DBUF_THRES_MAX(8),
399                        EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
400
401                 /* Processing Engine configuration */
402
403                 /* Token & context configuration */
404                 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
405                       EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
406                       EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
407                 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
408
409                 /* H/W capabilities selection: just enable everything */
410                 writel(EIP197_FUNCTION_ALL,
411                        EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
412         }
413
414         /* Command Descriptor Rings prepare */
415         for (i = 0; i < priv->config.rings; i++) {
416                 /* Clear interrupts for this ring */
417                 writel(GENMASK(31, 0),
418                        EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
419
420                 /* Disable external triggering */
421                 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
422
423                 /* Clear the pending prepared counter */
424                 writel(EIP197_xDR_PREP_CLR_COUNT,
425                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
426
427                 /* Clear the pending processed counter */
428                 writel(EIP197_xDR_PROC_CLR_COUNT,
429                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
430
431                 writel(0,
432                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
433                 writel(0,
434                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
435
436                 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
437                        EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
438         }
439
440         /* Result Descriptor Ring prepare */
441         for (i = 0; i < priv->config.rings; i++) {
442                 /* Disable external triggering*/
443                 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
444
445                 /* Clear the pending prepared counter */
446                 writel(EIP197_xDR_PREP_CLR_COUNT,
447                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
448
449                 /* Clear the pending processed counter */
450                 writel(EIP197_xDR_PROC_CLR_COUNT,
451                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
452
453                 writel(0,
454                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
455                 writel(0,
456                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
457
458                 /* Ring size */
459                 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
460                        EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
461         }
462
463         for (pe = 0; pe < priv->config.pes; pe++) {
464                 /* Enable command descriptor rings */
465                 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
466                        EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
467
468                 /* Enable result descriptor rings */
469                 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
470                        EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
471         }
472
473         /* Clear any HIA interrupt */
474         writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
475
476         if (priv->version != EIP97IES_MRVL) {
477                 eip197_trc_cache_init(priv);
478
479                 ret = eip197_load_firmwares(priv);
480                 if (ret)
481                         return ret;
482         }
483
484         safexcel_hw_setup_cdesc_rings(priv);
485         safexcel_hw_setup_rdesc_rings(priv);
486
487         return 0;
488 }
489
490 /* Called with ring's lock taken */
491 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
492                                        int ring)
493 {
494         int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
495
496         if (!coal)
497                 return;
498
499         /* Configure when we want an interrupt */
500         writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
501                EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
502                EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
503 }
504
505 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
506 {
507         struct crypto_async_request *req, *backlog;
508         struct safexcel_context *ctx;
509         int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
510
511         /* If a request wasn't properly dequeued because of a lack of resources,
512          * proceeded it first,
513          */
514         req = priv->ring[ring].req;
515         backlog = priv->ring[ring].backlog;
516         if (req)
517                 goto handle_req;
518
519         while (true) {
520                 spin_lock_bh(&priv->ring[ring].queue_lock);
521                 backlog = crypto_get_backlog(&priv->ring[ring].queue);
522                 req = crypto_dequeue_request(&priv->ring[ring].queue);
523                 spin_unlock_bh(&priv->ring[ring].queue_lock);
524
525                 if (!req) {
526                         priv->ring[ring].req = NULL;
527                         priv->ring[ring].backlog = NULL;
528                         goto finalize;
529                 }
530
531 handle_req:
532                 ctx = crypto_tfm_ctx(req->tfm);
533                 ret = ctx->send(req, ring, &commands, &results);
534                 if (ret)
535                         goto request_failed;
536
537                 if (backlog)
538                         backlog->complete(backlog, -EINPROGRESS);
539
540                 /* In case the send() helper did not issue any command to push
541                  * to the engine because the input data was cached, continue to
542                  * dequeue other requests as this is valid and not an error.
543                  */
544                 if (!commands && !results)
545                         continue;
546
547                 cdesc += commands;
548                 rdesc += results;
549                 nreq++;
550         }
551
552 request_failed:
553         /* Not enough resources to handle all the requests. Bail out and save
554          * the request and the backlog for the next dequeue call (per-ring).
555          */
556         priv->ring[ring].req = req;
557         priv->ring[ring].backlog = backlog;
558
559 finalize:
560         if (!nreq)
561                 return;
562
563         spin_lock_bh(&priv->ring[ring].lock);
564
565         priv->ring[ring].requests += nreq;
566
567         if (!priv->ring[ring].busy) {
568                 safexcel_try_push_requests(priv, ring);
569                 priv->ring[ring].busy = true;
570         }
571
572         spin_unlock_bh(&priv->ring[ring].lock);
573
574         /* let the RDR know we have pending descriptors */
575         writel((rdesc * priv->config.rd_offset) << 2,
576                EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
577
578         /* let the CDR know we have pending descriptors */
579         writel((cdesc * priv->config.cd_offset) << 2,
580                EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
581 }
582
583 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
584                                        struct safexcel_result_desc *rdesc)
585 {
586         if (likely((!rdesc->descriptor_overflow) &&
587                    (!rdesc->buffer_overflow) &&
588                    (!rdesc->result_data.error_code)))
589                 return 0;
590
591         if (rdesc->descriptor_overflow)
592                 dev_err(priv->dev, "Descriptor overflow detected");
593
594         if (rdesc->buffer_overflow)
595                 dev_err(priv->dev, "Buffer overflow detected");
596
597         if (rdesc->result_data.error_code & 0x4066) {
598                 /* Fatal error (bits 1,2,5,6 & 14) */
599                 dev_err(priv->dev,
600                         "result descriptor error (%x)",
601                         rdesc->result_data.error_code);
602                 return -EIO;
603         } else if (rdesc->result_data.error_code &
604                    (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
605                 /*
606                  * Give priority over authentication fails:
607                  * Blocksize, length & overflow errors,
608                  * something wrong with the input!
609                  */
610                 return -EINVAL;
611         } else if (rdesc->result_data.error_code & BIT(9)) {
612                 /* Authentication failed */
613                 return -EBADMSG;
614         }
615
616         /* All other non-fatal errors */
617         return -EINVAL;
618 }
619
620 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
621                                  int ring,
622                                  struct safexcel_result_desc *rdesc,
623                                  struct crypto_async_request *req)
624 {
625         int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
626
627         priv->ring[ring].rdr_req[i] = req;
628 }
629
630 inline struct crypto_async_request *
631 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
632 {
633         int i = safexcel_ring_first_rdr_index(priv, ring);
634
635         return priv->ring[ring].rdr_req[i];
636 }
637
638 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
639 {
640         struct safexcel_command_desc *cdesc;
641
642         /* Acknowledge the command descriptors */
643         do {
644                 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
645                 if (IS_ERR(cdesc)) {
646                         dev_err(priv->dev,
647                                 "Could not retrieve the command descriptor\n");
648                         return;
649                 }
650         } while (!cdesc->last_seg);
651 }
652
653 void safexcel_inv_complete(struct crypto_async_request *req, int error)
654 {
655         struct safexcel_inv_result *result = req->data;
656
657         if (error == -EINPROGRESS)
658                 return;
659
660         result->error = error;
661         complete(&result->completion);
662 }
663
664 int safexcel_invalidate_cache(struct crypto_async_request *async,
665                               struct safexcel_crypto_priv *priv,
666                               dma_addr_t ctxr_dma, int ring)
667 {
668         struct safexcel_command_desc *cdesc;
669         struct safexcel_result_desc *rdesc;
670         int ret = 0;
671
672         /* Prepare command descriptor */
673         cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
674         if (IS_ERR(cdesc))
675                 return PTR_ERR(cdesc);
676
677         cdesc->control_data.type = EIP197_TYPE_EXTENDED;
678         cdesc->control_data.options = 0;
679         cdesc->control_data.refresh = 0;
680         cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
681
682         /* Prepare result descriptor */
683         rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
684
685         if (IS_ERR(rdesc)) {
686                 ret = PTR_ERR(rdesc);
687                 goto cdesc_rollback;
688         }
689
690         safexcel_rdr_req_set(priv, ring, rdesc, async);
691
692         return ret;
693
694 cdesc_rollback:
695         safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
696
697         return ret;
698 }
699
700 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
701                                                      int ring)
702 {
703         struct crypto_async_request *req;
704         struct safexcel_context *ctx;
705         int ret, i, nreq, ndesc, tot_descs, handled = 0;
706         bool should_complete;
707
708 handle_results:
709         tot_descs = 0;
710
711         nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
712         nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
713         nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
714         if (!nreq)
715                 goto requests_left;
716
717         for (i = 0; i < nreq; i++) {
718                 req = safexcel_rdr_req_get(priv, ring);
719
720                 ctx = crypto_tfm_ctx(req->tfm);
721                 ndesc = ctx->handle_result(priv, ring, req,
722                                            &should_complete, &ret);
723                 if (ndesc < 0) {
724                         dev_err(priv->dev, "failed to handle result (%d)\n",
725                                 ndesc);
726                         goto acknowledge;
727                 }
728
729                 if (should_complete) {
730                         local_bh_disable();
731                         req->complete(req, ret);
732                         local_bh_enable();
733                 }
734
735                 tot_descs += ndesc;
736                 handled++;
737         }
738
739 acknowledge:
740         if (i)
741                 writel(EIP197_xDR_PROC_xD_PKT(i) |
742                        EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
743                        EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
744
745         /* If the number of requests overflowed the counter, try to proceed more
746          * requests.
747          */
748         if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
749                 goto handle_results;
750
751 requests_left:
752         spin_lock_bh(&priv->ring[ring].lock);
753
754         priv->ring[ring].requests -= handled;
755         safexcel_try_push_requests(priv, ring);
756
757         if (!priv->ring[ring].requests)
758                 priv->ring[ring].busy = false;
759
760         spin_unlock_bh(&priv->ring[ring].lock);
761 }
762
763 static void safexcel_dequeue_work(struct work_struct *work)
764 {
765         struct safexcel_work_data *data =
766                         container_of(work, struct safexcel_work_data, work);
767
768         safexcel_dequeue(data->priv, data->ring);
769 }
770
771 struct safexcel_ring_irq_data {
772         struct safexcel_crypto_priv *priv;
773         int ring;
774 };
775
776 static irqreturn_t safexcel_irq_ring(int irq, void *data)
777 {
778         struct safexcel_ring_irq_data *irq_data = data;
779         struct safexcel_crypto_priv *priv = irq_data->priv;
780         int ring = irq_data->ring, rc = IRQ_NONE;
781         u32 status, stat;
782
783         status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
784         if (!status)
785                 return rc;
786
787         /* RDR interrupts */
788         if (status & EIP197_RDR_IRQ(ring)) {
789                 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
790
791                 if (unlikely(stat & EIP197_xDR_ERR)) {
792                         /*
793                          * Fatal error, the RDR is unusable and must be
794                          * reinitialized. This should not happen under
795                          * normal circumstances.
796                          */
797                         dev_err(priv->dev, "RDR: fatal error.\n");
798                 } else if (likely(stat & EIP197_xDR_THRESH)) {
799                         rc = IRQ_WAKE_THREAD;
800                 }
801
802                 /* ACK the interrupts */
803                 writel(stat & 0xff,
804                        EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
805         }
806
807         /* ACK the interrupts */
808         writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
809
810         return rc;
811 }
812
813 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
814 {
815         struct safexcel_ring_irq_data *irq_data = data;
816         struct safexcel_crypto_priv *priv = irq_data->priv;
817         int ring = irq_data->ring;
818
819         safexcel_handle_result_descriptor(priv, ring);
820
821         queue_work(priv->ring[ring].workqueue,
822                    &priv->ring[ring].work_data.work);
823
824         return IRQ_HANDLED;
825 }
826
827 static int safexcel_request_ring_irq(void *pdev, int irqid,
828                                      int is_pci_dev,
829                                      irq_handler_t handler,
830                                      irq_handler_t threaded_handler,
831                                      struct safexcel_ring_irq_data *ring_irq_priv)
832 {
833         int ret, irq;
834         struct device *dev;
835
836         if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
837                 struct pci_dev *pci_pdev = pdev;
838
839                 dev = &pci_pdev->dev;
840                 irq = pci_irq_vector(pci_pdev, irqid);
841                 if (irq < 0) {
842                         dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
843                                 irqid, irq);
844                         return irq;
845                 }
846         } else if (IS_ENABLED(CONFIG_OF)) {
847                 struct platform_device *plf_pdev = pdev;
848                 char irq_name[6] = {0}; /* "ringX\0" */
849
850                 snprintf(irq_name, 6, "ring%d", irqid);
851                 dev = &plf_pdev->dev;
852                 irq = platform_get_irq_byname(plf_pdev, irq_name);
853
854                 if (irq < 0) {
855                         dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
856                                 irq_name, irq);
857                         return irq;
858                 }
859         }
860
861         ret = devm_request_threaded_irq(dev, irq, handler,
862                                         threaded_handler, IRQF_ONESHOT,
863                                         dev_name(dev), ring_irq_priv);
864         if (ret) {
865                 dev_err(dev, "unable to request IRQ %d\n", irq);
866                 return ret;
867         }
868
869         return irq;
870 }
871
872 static struct safexcel_alg_template *safexcel_algs[] = {
873         &safexcel_alg_ecb_des,
874         &safexcel_alg_cbc_des,
875         &safexcel_alg_ecb_des3_ede,
876         &safexcel_alg_cbc_des3_ede,
877         &safexcel_alg_ecb_aes,
878         &safexcel_alg_cbc_aes,
879         &safexcel_alg_ctr_aes,
880         &safexcel_alg_md5,
881         &safexcel_alg_sha1,
882         &safexcel_alg_sha224,
883         &safexcel_alg_sha256,
884         &safexcel_alg_sha384,
885         &safexcel_alg_sha512,
886         &safexcel_alg_hmac_md5,
887         &safexcel_alg_hmac_sha1,
888         &safexcel_alg_hmac_sha224,
889         &safexcel_alg_hmac_sha256,
890         &safexcel_alg_hmac_sha384,
891         &safexcel_alg_hmac_sha512,
892         &safexcel_alg_authenc_hmac_sha1_cbc_aes,
893         &safexcel_alg_authenc_hmac_sha224_cbc_aes,
894         &safexcel_alg_authenc_hmac_sha256_cbc_aes,
895         &safexcel_alg_authenc_hmac_sha384_cbc_aes,
896         &safexcel_alg_authenc_hmac_sha512_cbc_aes,
897         &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
898         &safexcel_alg_authenc_hmac_sha1_ctr_aes,
899         &safexcel_alg_authenc_hmac_sha224_ctr_aes,
900         &safexcel_alg_authenc_hmac_sha256_ctr_aes,
901         &safexcel_alg_authenc_hmac_sha384_ctr_aes,
902         &safexcel_alg_authenc_hmac_sha512_ctr_aes,
903 };
904
905 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
906 {
907         int i, j, ret = 0;
908
909         for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
910                 safexcel_algs[i]->priv = priv;
911
912                 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
913                         ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
914                 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
915                         ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
916                 else
917                         ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
918
919                 if (ret)
920                         goto fail;
921         }
922
923         return 0;
924
925 fail:
926         for (j = 0; j < i; j++) {
927                 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
928                         crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
929                 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
930                         crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
931                 else
932                         crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
933         }
934
935         return ret;
936 }
937
938 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
939 {
940         int i;
941
942         for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
943                 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
944                         crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
945                 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
946                         crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
947                 else
948                         crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
949         }
950 }
951
952 static void safexcel_configure(struct safexcel_crypto_priv *priv)
953 {
954         u32 val, mask = 0;
955
956         val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
957
958         /* Read number of PEs from the engine */
959         if (priv->version == EIP97IES_MRVL)
960                 /* Narrow field width for EIP97 type engine */
961                 mask = EIP97_N_PES_MASK;
962         else
963                 /* Wider field width for all EIP197 type engines */
964                 mask = EIP197_N_PES_MASK;
965
966         priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
967
968         priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
969
970         val = (val & GENMASK(27, 25)) >> 25;
971         mask = BIT(val) - 1;
972
973         priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
974         priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
975
976         priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
977         priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
978 }
979
980 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
981 {
982         struct safexcel_register_offsets *offsets = &priv->offsets;
983
984         if (priv->version == EIP97IES_MRVL) {
985                 offsets->hia_aic        = EIP97_HIA_AIC_BASE;
986                 offsets->hia_aic_g      = EIP97_HIA_AIC_G_BASE;
987                 offsets->hia_aic_r      = EIP97_HIA_AIC_R_BASE;
988                 offsets->hia_aic_xdr    = EIP97_HIA_AIC_xDR_BASE;
989                 offsets->hia_dfe        = EIP97_HIA_DFE_BASE;
990                 offsets->hia_dfe_thr    = EIP97_HIA_DFE_THR_BASE;
991                 offsets->hia_dse        = EIP97_HIA_DSE_BASE;
992                 offsets->hia_dse_thr    = EIP97_HIA_DSE_THR_BASE;
993                 offsets->hia_gen_cfg    = EIP97_HIA_GEN_CFG_BASE;
994                 offsets->pe             = EIP97_PE_BASE;
995         } else {
996                 offsets->hia_aic        = EIP197_HIA_AIC_BASE;
997                 offsets->hia_aic_g      = EIP197_HIA_AIC_G_BASE;
998                 offsets->hia_aic_r      = EIP197_HIA_AIC_R_BASE;
999                 offsets->hia_aic_xdr    = EIP197_HIA_AIC_xDR_BASE;
1000                 offsets->hia_dfe        = EIP197_HIA_DFE_BASE;
1001                 offsets->hia_dfe_thr    = EIP197_HIA_DFE_THR_BASE;
1002                 offsets->hia_dse        = EIP197_HIA_DSE_BASE;
1003                 offsets->hia_dse_thr    = EIP197_HIA_DSE_THR_BASE;
1004                 offsets->hia_gen_cfg    = EIP197_HIA_GEN_CFG_BASE;
1005                 offsets->pe             = EIP197_PE_BASE;
1006         }
1007 }
1008
1009 /*
1010  * Generic part of probe routine, shared by platform and PCI driver
1011  *
1012  * Assumes IO resources have been mapped, private data mem has been allocated,
1013  * clocks have been enabled, device pointer has been assigned etc.
1014  *
1015  */
1016 static int safexcel_probe_generic(void *pdev,
1017                                   struct safexcel_crypto_priv *priv,
1018                                   int is_pci_dev)
1019 {
1020         struct device *dev = priv->dev;
1021         int i, ret;
1022
1023         priv->context_pool = dmam_pool_create("safexcel-context", dev,
1024                                               sizeof(struct safexcel_context_record),
1025                                               1, 0);
1026         if (!priv->context_pool)
1027                 return -ENOMEM;
1028
1029         safexcel_init_register_offsets(priv);
1030
1031         if (priv->version != EIP97IES_MRVL)
1032                 priv->flags |= EIP197_TRC_CACHE;
1033
1034         safexcel_configure(priv);
1035
1036         if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1037                 /*
1038                  * Request MSI vectors for global + 1 per ring -
1039                  * or just 1 for older dev images
1040                  */
1041                 struct pci_dev *pci_pdev = pdev;
1042
1043                 ret = pci_alloc_irq_vectors(pci_pdev,
1044                                             priv->config.rings + 1,
1045                                             priv->config.rings + 1,
1046                                             PCI_IRQ_MSI | PCI_IRQ_MSIX);
1047                 if (ret < 0) {
1048                         dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1049                         return ret;
1050                 }
1051         }
1052
1053         /* Register the ring IRQ handlers and configure the rings */
1054         priv->ring = devm_kcalloc(dev, priv->config.rings,
1055                                   sizeof(*priv->ring),
1056                                   GFP_KERNEL);
1057         if (!priv->ring)
1058                 return -ENOMEM;
1059
1060         for (i = 0; i < priv->config.rings; i++) {
1061                 char wq_name[9] = {0};
1062                 int irq;
1063                 struct safexcel_ring_irq_data *ring_irq;
1064
1065                 ret = safexcel_init_ring_descriptors(priv,
1066                                                      &priv->ring[i].cdr,
1067                                                      &priv->ring[i].rdr);
1068                 if (ret) {
1069                         dev_err(dev, "Failed to initialize rings\n");
1070                         return ret;
1071                 }
1072
1073                 priv->ring[i].rdr_req = devm_kcalloc(dev,
1074                         EIP197_DEFAULT_RING_SIZE,
1075                         sizeof(priv->ring[i].rdr_req),
1076                         GFP_KERNEL);
1077                 if (!priv->ring[i].rdr_req)
1078                         return -ENOMEM;
1079
1080                 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1081                 if (!ring_irq)
1082                         return -ENOMEM;
1083
1084                 ring_irq->priv = priv;
1085                 ring_irq->ring = i;
1086
1087                 irq = safexcel_request_ring_irq(pdev,
1088                                                 EIP197_IRQ_NUMBER(i, is_pci_dev),
1089                                                 is_pci_dev,
1090                                                 safexcel_irq_ring,
1091                                                 safexcel_irq_ring_thread,
1092                                                 ring_irq);
1093                 if (irq < 0) {
1094                         dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1095                         return irq;
1096                 }
1097
1098                 priv->ring[i].work_data.priv = priv;
1099                 priv->ring[i].work_data.ring = i;
1100                 INIT_WORK(&priv->ring[i].work_data.work,
1101                           safexcel_dequeue_work);
1102
1103                 snprintf(wq_name, 9, "wq_ring%d", i);
1104                 priv->ring[i].workqueue =
1105                         create_singlethread_workqueue(wq_name);
1106                 if (!priv->ring[i].workqueue)
1107                         return -ENOMEM;
1108
1109                 priv->ring[i].requests = 0;
1110                 priv->ring[i].busy = false;
1111
1112                 crypto_init_queue(&priv->ring[i].queue,
1113                                   EIP197_DEFAULT_RING_SIZE);
1114
1115                 spin_lock_init(&priv->ring[i].lock);
1116                 spin_lock_init(&priv->ring[i].queue_lock);
1117         }
1118
1119         atomic_set(&priv->ring_used, 0);
1120
1121         ret = safexcel_hw_init(priv);
1122         if (ret) {
1123                 dev_err(dev, "HW init failed (%d)\n", ret);
1124                 return ret;
1125         }
1126
1127         ret = safexcel_register_algorithms(priv);
1128         if (ret) {
1129                 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1130                 return ret;
1131         }
1132
1133         return 0;
1134 }
1135
1136 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1137 {
1138         int i;
1139
1140         for (i = 0; i < priv->config.rings; i++) {
1141                 /* clear any pending interrupt */
1142                 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1143                 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1144
1145                 /* Reset the CDR base address */
1146                 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1147                 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1148
1149                 /* Reset the RDR base address */
1150                 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1151                 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1152         }
1153 }
1154
1155 #if IS_ENABLED(CONFIG_OF)
1156 /* for Device Tree platform driver */
1157
1158 static int safexcel_probe(struct platform_device *pdev)
1159 {
1160         struct device *dev = &pdev->dev;
1161         struct safexcel_crypto_priv *priv;
1162         int ret;
1163
1164         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1165         if (!priv)
1166                 return -ENOMEM;
1167
1168         priv->dev = dev;
1169         priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1170
1171         platform_set_drvdata(pdev, priv);
1172
1173         priv->base = devm_platform_ioremap_resource(pdev, 0);
1174         if (IS_ERR(priv->base)) {
1175                 dev_err(dev, "failed to get resource\n");
1176                 return PTR_ERR(priv->base);
1177         }
1178
1179         priv->clk = devm_clk_get(&pdev->dev, NULL);
1180         ret = PTR_ERR_OR_ZERO(priv->clk);
1181         /* The clock isn't mandatory */
1182         if  (ret != -ENOENT) {
1183                 if (ret)
1184                         return ret;
1185
1186                 ret = clk_prepare_enable(priv->clk);
1187                 if (ret) {
1188                         dev_err(dev, "unable to enable clk (%d)\n", ret);
1189                         return ret;
1190                 }
1191         }
1192
1193         priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1194         ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1195         /* The clock isn't mandatory */
1196         if  (ret != -ENOENT) {
1197                 if (ret)
1198                         goto err_core_clk;
1199
1200                 ret = clk_prepare_enable(priv->reg_clk);
1201                 if (ret) {
1202                         dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1203                         goto err_core_clk;
1204                 }
1205         }
1206
1207         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1208         if (ret)
1209                 goto err_reg_clk;
1210
1211         /* Generic EIP97/EIP197 device probing */
1212         ret = safexcel_probe_generic(pdev, priv, 0);
1213         if (ret)
1214                 goto err_reg_clk;
1215
1216         return 0;
1217
1218 err_reg_clk:
1219         clk_disable_unprepare(priv->reg_clk);
1220 err_core_clk:
1221         clk_disable_unprepare(priv->clk);
1222         return ret;
1223 }
1224
1225 static int safexcel_remove(struct platform_device *pdev)
1226 {
1227         struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1228         int i;
1229
1230         safexcel_unregister_algorithms(priv);
1231         safexcel_hw_reset_rings(priv);
1232
1233         clk_disable_unprepare(priv->clk);
1234
1235         for (i = 0; i < priv->config.rings; i++)
1236                 destroy_workqueue(priv->ring[i].workqueue);
1237
1238         return 0;
1239 }
1240
1241 static const struct of_device_id safexcel_of_match_table[] = {
1242         {
1243                 .compatible = "inside-secure,safexcel-eip97ies",
1244                 .data = (void *)EIP97IES_MRVL,
1245         },
1246         {
1247                 .compatible = "inside-secure,safexcel-eip197b",
1248                 .data = (void *)EIP197B_MRVL,
1249         },
1250         {
1251                 .compatible = "inside-secure,safexcel-eip197d",
1252                 .data = (void *)EIP197D_MRVL,
1253         },
1254         /* For backward compatibility and intended for generic use */
1255         {
1256                 .compatible = "inside-secure,safexcel-eip97",
1257                 .data = (void *)EIP97IES_MRVL,
1258         },
1259         {
1260                 .compatible = "inside-secure,safexcel-eip197",
1261                 .data = (void *)EIP197B_MRVL,
1262         },
1263         {},
1264 };
1265
1266 static struct platform_driver  crypto_safexcel = {
1267         .probe          = safexcel_probe,
1268         .remove         = safexcel_remove,
1269         .driver         = {
1270                 .name   = "crypto-safexcel",
1271                 .of_match_table = safexcel_of_match_table,
1272         },
1273 };
1274 #endif
1275
1276 #if IS_ENABLED(CONFIG_PCI)
1277 /* PCIE devices - i.e. Inside Secure development boards */
1278
1279 static int safexcel_pci_probe(struct pci_dev *pdev,
1280                                const struct pci_device_id *ent)
1281 {
1282         struct device *dev = &pdev->dev;
1283         struct safexcel_crypto_priv *priv;
1284         void __iomem *pciebase;
1285         int rc;
1286         u32 val;
1287
1288         dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1289                 ent->vendor, ent->device, ent->subvendor,
1290                 ent->subdevice, ent->driver_data);
1291
1292         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1293         if (!priv)
1294                 return -ENOMEM;
1295
1296         priv->dev = dev;
1297         priv->version = (enum safexcel_eip_version)ent->driver_data;
1298
1299         pci_set_drvdata(pdev, priv);
1300
1301         /* enable the device */
1302         rc = pcim_enable_device(pdev);
1303         if (rc) {
1304                 dev_err(dev, "Failed to enable PCI device\n");
1305                 return rc;
1306         }
1307
1308         /* take ownership of PCI BAR0 */
1309         rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1310         if (rc) {
1311                 dev_err(dev, "Failed to map IO region for BAR0\n");
1312                 return rc;
1313         }
1314         priv->base = pcim_iomap_table(pdev)[0];
1315
1316         if (priv->version == EIP197_DEVBRD) {
1317                 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1318
1319                 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1320                 if (rc) {
1321                         dev_err(dev, "Failed to map IO region for BAR4\n");
1322                         return rc;
1323                 }
1324
1325                 pciebase = pcim_iomap_table(pdev)[2];
1326                 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1327                 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1328                         dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1329                                 (val & 0xff));
1330
1331                         /* Setup MSI identity map mapping */
1332                         writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1333                                pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1334                         writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1335                                pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1336                         writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1337                                pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1338                         writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1339                                pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1340
1341                         /* Enable all device interrupts */
1342                         writel(GENMASK(31, 0),
1343                                pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1344                 } else {
1345                         dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1346                                 val);
1347                         return -ENODEV;
1348                 }
1349
1350                 /* HW reset FPGA dev board */
1351                 /* assert reset */
1352                 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1353                 wmb(); /* maintain strict ordering for accesses here */
1354                 /* deassert reset */
1355                 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1356                 wmb(); /* maintain strict ordering for accesses here */
1357         }
1358
1359         /* enable bus mastering */
1360         pci_set_master(pdev);
1361
1362         /* Generic EIP97/EIP197 device probing */
1363         rc = safexcel_probe_generic(pdev, priv, 1);
1364         return rc;
1365 }
1366
1367 void safexcel_pci_remove(struct pci_dev *pdev)
1368 {
1369         struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1370         int i;
1371
1372         safexcel_unregister_algorithms(priv);
1373
1374         for (i = 0; i < priv->config.rings; i++)
1375                 destroy_workqueue(priv->ring[i].workqueue);
1376
1377         safexcel_hw_reset_rings(priv);
1378 }
1379
1380 static const struct pci_device_id safexcel_pci_ids[] = {
1381         {
1382                 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1383                                0x16ae, 0xc522),
1384                 /* assume EIP197B for now */
1385                 .driver_data = EIP197_DEVBRD,
1386         },
1387         {},
1388 };
1389
1390 MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1391
1392 static struct pci_driver safexcel_pci_driver = {
1393         .name          = "crypto-safexcel",
1394         .id_table      = safexcel_pci_ids,
1395         .probe         = safexcel_pci_probe,
1396         .remove        = safexcel_pci_remove,
1397 };
1398 #endif
1399
1400 static int __init safexcel_init(void)
1401 {
1402         int rc;
1403
1404 #if IS_ENABLED(CONFIG_OF)
1405                 /* Register platform driver */
1406                 platform_driver_register(&crypto_safexcel);
1407 #endif
1408
1409 #if IS_ENABLED(CONFIG_PCI)
1410                 /* Register PCI driver */
1411                 rc = pci_register_driver(&safexcel_pci_driver);
1412 #endif
1413
1414         return 0;
1415 }
1416
1417 static void __exit safexcel_exit(void)
1418 {
1419 #if IS_ENABLED(CONFIG_OF)
1420                 /* Unregister platform driver */
1421                 platform_driver_unregister(&crypto_safexcel);
1422 #endif
1423
1424 #if IS_ENABLED(CONFIG_PCI)
1425                 /* Unregister PCI driver if successfully registered before */
1426                 pci_unregister_driver(&safexcel_pci_driver);
1427 #endif
1428 }
1429
1430 module_init(safexcel_init);
1431 module_exit(safexcel_exit);
1432
1433 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1434 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1435 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1436 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1437 MODULE_LICENSE("GPL v2");