2360d31417aae8fda84ea94103d1618ccda0df7c
[linux-2.6-microblaze.git] / drivers / crypto / hisilicon / sec2 / sec_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3
4 #include <linux/acpi.h>
5 #include <linux/aer.h>
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/iommu.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/seq_file.h>
15 #include <linux/topology.h>
16 #include <linux/uacce.h>
17
18 #include "sec.h"
19
20 #define SEC_VF_NUM                      63
21 #define SEC_QUEUE_NUM_V1                4096
22 #define SEC_PF_PCI_DEVICE_ID            0xa255
23 #define SEC_VF_PCI_DEVICE_ID            0xa256
24
25 #define SEC_BD_ERR_CHK_EN0              0xEFFFFFFF
26 #define SEC_BD_ERR_CHK_EN1              0x7ffff7fd
27 #define SEC_BD_ERR_CHK_EN3              0xffffbfff
28
29 #define SEC_SQE_SIZE                    128
30 #define SEC_SQ_SIZE                     (SEC_SQE_SIZE * QM_Q_DEPTH)
31 #define SEC_PF_DEF_Q_NUM                256
32 #define SEC_PF_DEF_Q_BASE               0
33 #define SEC_CTX_Q_NUM_DEF               2
34 #define SEC_CTX_Q_NUM_MAX               32
35
36 #define SEC_CTRL_CNT_CLR_CE             0x301120
37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
38 #define SEC_CORE_INT_SOURCE             0x301010
39 #define SEC_CORE_INT_MASK               0x301000
40 #define SEC_CORE_INT_STATUS             0x301008
41 #define SEC_CORE_SRAM_ECC_ERR_INFO      0x301C14
42 #define SEC_ECC_NUM                     16
43 #define SEC_ECC_MASH                    0xFF
44 #define SEC_CORE_INT_DISABLE            0x0
45 #define SEC_CORE_INT_ENABLE             0x1ff
46 #define SEC_CORE_INT_CLEAR              0x1ff
47 #define SEC_SAA_ENABLE                  0x17f
48
49 #define SEC_RAS_CE_REG                  0x301050
50 #define SEC_RAS_FE_REG                  0x301054
51 #define SEC_RAS_NFE_REG                 0x301058
52 #define SEC_RAS_CE_ENB_MSK              0x88
53 #define SEC_RAS_FE_ENB_MSK              0x0
54 #define SEC_RAS_NFE_ENB_MSK             0x177
55 #define SEC_RAS_DISABLE         0x0
56 #define SEC_MEM_START_INIT_REG  0x301100
57 #define SEC_MEM_INIT_DONE_REG           0x301104
58
59 #define SEC_CONTROL_REG         0x301200
60 #define SEC_TRNG_EN_SHIFT               8
61 #define SEC_CLK_GATE_ENABLE             BIT(3)
62 #define SEC_CLK_GATE_DISABLE            (~BIT(3))
63 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
64 #define SEC_AXI_SHUTDOWN_DISABLE        0xFFFFEFFF
65
66 #define SEC_INTERFACE_USER_CTRL0_REG    0x301220
67 #define SEC_INTERFACE_USER_CTRL1_REG    0x301224
68 #define SEC_SAA_EN_REG                  0x301270
69 #define SEC_BD_ERR_CHK_EN_REG0          0x301380
70 #define SEC_BD_ERR_CHK_EN_REG1          0x301384
71 #define SEC_BD_ERR_CHK_EN_REG3          0x30138c
72
73 #define SEC_USER0_SMMU_NORMAL           (BIT(23) | BIT(15))
74 #define SEC_USER1_SMMU_NORMAL           (BIT(31) | BIT(23) | BIT(15) | BIT(7))
75 #define SEC_USER1_ENABLE_CONTEXT_SSV    BIT(24)
76 #define SEC_USER1_ENABLE_DATA_SSV       BIT(16)
77 #define SEC_USER1_WB_CONTEXT_SSV        BIT(8)
78 #define SEC_USER1_WB_DATA_SSV           BIT(0)
79 #define SEC_USER1_SVA_SET               (SEC_USER1_ENABLE_CONTEXT_SSV | \
80                                         SEC_USER1_ENABLE_DATA_SSV | \
81                                         SEC_USER1_WB_CONTEXT_SSV |  \
82                                         SEC_USER1_WB_DATA_SSV)
83 #define SEC_USER1_SMMU_SVA              (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
84 #define SEC_USER1_SMMU_MASK             (~SEC_USER1_SVA_SET)
85 #define SEC_CORE_INT_STATUS_M_ECC       BIT(2)
86
87 #define SEC_DELAY_10_US                 10
88 #define SEC_POLL_TIMEOUT_US             1000
89 #define SEC_DBGFS_VAL_MAX_LEN           20
90 #define SEC_SINGLE_PORT_MAX_TRANS       0x2060
91
92 #define SEC_SQE_MASK_OFFSET             64
93 #define SEC_SQE_MASK_LEN                48
94
95 struct sec_hw_error {
96         u32 int_msk;
97         const char *msg;
98 };
99
100 struct sec_dfx_item {
101         const char *name;
102         u32 offset;
103 };
104
105 static const char sec_name[] = "hisi_sec2";
106 static struct dentry *sec_debugfs_root;
107
108 static struct hisi_qm_list sec_devices = {
109         .register_to_crypto     = sec_register_to_crypto,
110         .unregister_from_crypto = sec_unregister_from_crypto,
111 };
112
113 static const struct sec_hw_error sec_hw_errors[] = {
114         {
115                 .int_msk = BIT(0),
116                 .msg = "sec_axi_rresp_err_rint"
117         },
118         {
119                 .int_msk = BIT(1),
120                 .msg = "sec_axi_bresp_err_rint"
121         },
122         {
123                 .int_msk = BIT(2),
124                 .msg = "sec_ecc_2bit_err_rint"
125         },
126         {
127                 .int_msk = BIT(3),
128                 .msg = "sec_ecc_1bit_err_rint"
129         },
130         {
131                 .int_msk = BIT(4),
132                 .msg = "sec_req_trng_timeout_rint"
133         },
134         {
135                 .int_msk = BIT(5),
136                 .msg = "sec_fsm_hbeat_rint"
137         },
138         {
139                 .int_msk = BIT(6),
140                 .msg = "sec_channel_req_rng_timeout_rint"
141         },
142         {
143                 .int_msk = BIT(7),
144                 .msg = "sec_bd_err_rint"
145         },
146         {
147                 .int_msk = BIT(8),
148                 .msg = "sec_chain_buff_err_rint"
149         },
150         {}
151 };
152
153 static const char * const sec_dbg_file_name[] = {
154         [SEC_CURRENT_QM] = "current_qm",
155         [SEC_CLEAR_ENABLE] = "clear_enable",
156 };
157
158 static struct sec_dfx_item sec_dfx_labels[] = {
159         {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
160         {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
161         {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
162         {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
163         {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
164         {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
165         {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
166 };
167
168 static const struct debugfs_reg32 sec_dfx_regs[] = {
169         {"SEC_PF_ABNORMAL_INT_SOURCE    ",  0x301010},
170         {"SEC_SAA_EN                    ",  0x301270},
171         {"SEC_BD_LATENCY_MIN            ",  0x301600},
172         {"SEC_BD_LATENCY_MAX            ",  0x301608},
173         {"SEC_BD_LATENCY_AVG            ",  0x30160C},
174         {"SEC_BD_NUM_IN_SAA0            ",  0x301670},
175         {"SEC_BD_NUM_IN_SAA1            ",  0x301674},
176         {"SEC_BD_NUM_IN_SEC             ",  0x301680},
177         {"SEC_ECC_1BIT_CNT              ",  0x301C00},
178         {"SEC_ECC_1BIT_INFO             ",  0x301C04},
179         {"SEC_ECC_2BIT_CNT              ",  0x301C10},
180         {"SEC_ECC_2BIT_INFO             ",  0x301C14},
181         {"SEC_BD_SAA0                   ",  0x301C20},
182         {"SEC_BD_SAA1                   ",  0x301C24},
183         {"SEC_BD_SAA2                   ",  0x301C28},
184         {"SEC_BD_SAA3                   ",  0x301C2C},
185         {"SEC_BD_SAA4                   ",  0x301C30},
186         {"SEC_BD_SAA5                   ",  0x301C34},
187         {"SEC_BD_SAA6                   ",  0x301C38},
188         {"SEC_BD_SAA7                   ",  0x301C3C},
189         {"SEC_BD_SAA8                   ",  0x301C40},
190 };
191
192 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
193 {
194         return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
195 }
196
197 static const struct kernel_param_ops sec_pf_q_num_ops = {
198         .set = sec_pf_q_num_set,
199         .get = param_get_int,
200 };
201
202 static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
203 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
204 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
205
206 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
207 {
208         u32 ctx_q_num;
209         int ret;
210
211         if (!val)
212                 return -EINVAL;
213
214         ret = kstrtou32(val, 10, &ctx_q_num);
215         if (ret)
216                 return -EINVAL;
217
218         if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
219                 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
220                 return -EINVAL;
221         }
222
223         return param_set_int(val, kp);
224 }
225
226 static const struct kernel_param_ops sec_ctx_q_num_ops = {
227         .set = sec_ctx_q_num_set,
228         .get = param_get_int,
229 };
230 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
231 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
232 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
233
234 static const struct kernel_param_ops vfs_num_ops = {
235         .set = vfs_num_set,
236         .get = param_get_int,
237 };
238
239 static u32 vfs_num;
240 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
241 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
242
243 void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
244 {
245         hisi_qm_free_qps(qps, qp_num);
246         kfree(qps);
247 }
248
249 struct hisi_qp **sec_create_qps(void)
250 {
251         int node = cpu_to_node(smp_processor_id());
252         u32 ctx_num = ctx_q_num;
253         struct hisi_qp **qps;
254         int ret;
255
256         qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
257         if (!qps)
258                 return NULL;
259
260         ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
261         if (!ret)
262                 return qps;
263
264         kfree(qps);
265         return NULL;
266 }
267
268 static const struct kernel_param_ops sec_uacce_mode_ops = {
269         .set = uacce_mode_set,
270         .get = param_get_int,
271 };
272
273 /*
274  * uacce_mode = 0 means sec only register to crypto,
275  * uacce_mode = 1 means sec both register to crypto and uacce.
276  */
277 static u32 uacce_mode = UACCE_MODE_NOUACCE;
278 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
279 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
280
281 static const struct pci_device_id sec_dev_ids[] = {
282         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
283         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
284         { 0, }
285 };
286 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
287
288 static u8 sec_get_endian(struct hisi_qm *qm)
289 {
290         u32 reg;
291
292         /*
293          * As for VF, it is a wrong way to get endian setting by
294          * reading a register of the engine
295          */
296         if (qm->pdev->is_virtfn) {
297                 dev_err_ratelimited(&qm->pdev->dev,
298                                     "cannot access a register in VF!\n");
299                 return SEC_LE;
300         }
301         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
302         /* BD little endian mode */
303         if (!(reg & BIT(0)))
304                 return SEC_LE;
305
306         /* BD 32-bits big endian mode */
307         else if (!(reg & BIT(1)))
308                 return SEC_32BE;
309
310         /* BD 64-bits big endian mode */
311         else
312                 return SEC_64BE;
313 }
314
315 static int sec_engine_init(struct hisi_qm *qm)
316 {
317         int ret;
318         u32 reg;
319
320         /* disable clock gate control */
321         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
322         reg &= SEC_CLK_GATE_DISABLE;
323         writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
324
325         writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
326
327         ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
328                                          reg, reg & 0x1, SEC_DELAY_10_US,
329                                          SEC_POLL_TIMEOUT_US);
330         if (ret) {
331                 pci_err(qm->pdev, "fail to init sec mem\n");
332                 return ret;
333         }
334
335         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
336         reg |= (0x1 << SEC_TRNG_EN_SHIFT);
337         writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
338
339         reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
340         reg |= SEC_USER0_SMMU_NORMAL;
341         writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
342
343         reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
344         reg &= SEC_USER1_SMMU_MASK;
345         if (qm->use_sva && qm->ver == QM_HW_V2)
346                 reg |= SEC_USER1_SMMU_SVA;
347         else
348                 reg |= SEC_USER1_SMMU_NORMAL;
349         writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
350
351         writel(SEC_SINGLE_PORT_MAX_TRANS,
352                qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
353
354         writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
355
356         /* Enable sm4 extra mode, as ctr/ecb */
357         writel_relaxed(SEC_BD_ERR_CHK_EN0,
358                        qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
359         /* Enable sm4 xts mode multiple iv */
360         writel_relaxed(SEC_BD_ERR_CHK_EN1,
361                        qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
362         writel_relaxed(SEC_BD_ERR_CHK_EN3,
363                        qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
364
365         /* config endian */
366         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
367         reg |= sec_get_endian(qm);
368         writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
369
370         return 0;
371 }
372
373 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
374 {
375         /* qm user domain */
376         writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
377         writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
378         writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
379         writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
380         writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
381
382         /* qm cache */
383         writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
384         writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
385
386         /* disable FLR triggered by BME(bus master enable) */
387         writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
388         writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
389
390         /* enable sqc,cqc writeback */
391         writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
392                CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
393                FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
394
395         return sec_engine_init(qm);
396 }
397
398 /* sec_debug_regs_clear() - clear the sec debug regs */
399 static void sec_debug_regs_clear(struct hisi_qm *qm)
400 {
401         int i;
402
403         /* clear current_qm */
404         writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
405         writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
406
407         /* clear sec dfx regs */
408         writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
409         for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
410                 readl(qm->io_base + sec_dfx_regs[i].offset);
411
412         /* clear rdclr_en */
413         writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
414
415         hisi_qm_debug_regs_clear(qm);
416 }
417
418 static void sec_hw_error_enable(struct hisi_qm *qm)
419 {
420         u32 val;
421
422         if (qm->ver == QM_HW_V1) {
423                 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
424                 pci_info(qm->pdev, "V1 not support hw error handle\n");
425                 return;
426         }
427
428         val = readl(qm->io_base + SEC_CONTROL_REG);
429
430         /* clear SEC hw error source if having */
431         writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
432
433         /* enable SEC hw error interrupts */
434         writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
435
436         /* enable RAS int */
437         writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
438         writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
439         writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
440
441         /* enable SEC block master OOO when m-bit error occur */
442         val = val | SEC_AXI_SHUTDOWN_ENABLE;
443
444         writel(val, qm->io_base + SEC_CONTROL_REG);
445 }
446
447 static void sec_hw_error_disable(struct hisi_qm *qm)
448 {
449         u32 val;
450
451         val = readl(qm->io_base + SEC_CONTROL_REG);
452
453         /* disable RAS int */
454         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
455         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
456         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
457
458         /* disable SEC hw error interrupts */
459         writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
460
461         /* disable SEC block master OOO when m-bit error occur */
462         val = val & SEC_AXI_SHUTDOWN_DISABLE;
463
464         writel(val, qm->io_base + SEC_CONTROL_REG);
465 }
466
467 static u32 sec_current_qm_read(struct sec_debug_file *file)
468 {
469         struct hisi_qm *qm = file->qm;
470
471         return readl(qm->io_base + QM_DFX_MB_CNT_VF);
472 }
473
474 static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
475 {
476         struct hisi_qm *qm = file->qm;
477         u32 vfq_num;
478         u32 tmp;
479
480         if (val > qm->vfs_num)
481                 return -EINVAL;
482
483         /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
484         if (!val) {
485                 qm->debug.curr_qm_qp_num = qm->qp_num;
486         } else {
487                 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
488
489                 if (val == qm->vfs_num)
490                         qm->debug.curr_qm_qp_num =
491                                 qm->ctrl_qp_num - qm->qp_num -
492                                 (qm->vfs_num - 1) * vfq_num;
493                 else
494                         qm->debug.curr_qm_qp_num = vfq_num;
495         }
496
497         writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
498         writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
499
500         tmp = val |
501               (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
502         writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
503
504         tmp = val |
505               (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
506         writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
507
508         return 0;
509 }
510
511 static u32 sec_clear_enable_read(struct sec_debug_file *file)
512 {
513         struct hisi_qm *qm = file->qm;
514
515         return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
516                         SEC_CTRL_CNT_CLR_CE_BIT;
517 }
518
519 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
520 {
521         struct hisi_qm *qm = file->qm;
522         u32 tmp;
523
524         if (val != 1 && val)
525                 return -EINVAL;
526
527         tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
528                ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
529         writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
530
531         return 0;
532 }
533
534 static ssize_t sec_debug_read(struct file *filp, char __user *buf,
535                                size_t count, loff_t *pos)
536 {
537         struct sec_debug_file *file = filp->private_data;
538         char tbuf[SEC_DBGFS_VAL_MAX_LEN];
539         u32 val;
540         int ret;
541
542         spin_lock_irq(&file->lock);
543
544         switch (file->index) {
545         case SEC_CURRENT_QM:
546                 val = sec_current_qm_read(file);
547                 break;
548         case SEC_CLEAR_ENABLE:
549                 val = sec_clear_enable_read(file);
550                 break;
551         default:
552                 spin_unlock_irq(&file->lock);
553                 return -EINVAL;
554         }
555
556         spin_unlock_irq(&file->lock);
557         ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
558
559         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
560 }
561
562 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
563                                size_t count, loff_t *pos)
564 {
565         struct sec_debug_file *file = filp->private_data;
566         char tbuf[SEC_DBGFS_VAL_MAX_LEN];
567         unsigned long val;
568         int len, ret;
569
570         if (*pos != 0)
571                 return 0;
572
573         if (count >= SEC_DBGFS_VAL_MAX_LEN)
574                 return -ENOSPC;
575
576         len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
577                                      pos, buf, count);
578         if (len < 0)
579                 return len;
580
581         tbuf[len] = '\0';
582         if (kstrtoul(tbuf, 0, &val))
583                 return -EFAULT;
584
585         spin_lock_irq(&file->lock);
586
587         switch (file->index) {
588         case SEC_CURRENT_QM:
589                 ret = sec_current_qm_write(file, val);
590                 if (ret)
591                         goto err_input;
592                 break;
593         case SEC_CLEAR_ENABLE:
594                 ret = sec_clear_enable_write(file, val);
595                 if (ret)
596                         goto err_input;
597                 break;
598         default:
599                 ret = -EINVAL;
600                 goto err_input;
601         }
602
603         spin_unlock_irq(&file->lock);
604
605         return count;
606
607  err_input:
608         spin_unlock_irq(&file->lock);
609         return ret;
610 }
611
612 static const struct file_operations sec_dbg_fops = {
613         .owner = THIS_MODULE,
614         .open = simple_open,
615         .read = sec_debug_read,
616         .write = sec_debug_write,
617 };
618
619 static int sec_debugfs_atomic64_get(void *data, u64 *val)
620 {
621         *val = atomic64_read((atomic64_t *)data);
622
623         return 0;
624 }
625
626 static int sec_debugfs_atomic64_set(void *data, u64 val)
627 {
628         if (val)
629                 return -EINVAL;
630
631         atomic64_set((atomic64_t *)data, 0);
632
633         return 0;
634 }
635
636 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
637                          sec_debugfs_atomic64_set, "%lld\n");
638
639 static int sec_core_debug_init(struct hisi_qm *qm)
640 {
641         struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
642         struct device *dev = &qm->pdev->dev;
643         struct sec_dfx *dfx = &sec->debug.dfx;
644         struct debugfs_regset32 *regset;
645         struct dentry *tmp_d;
646         int i;
647
648         tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
649
650         regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
651         if (!regset)
652                 return -ENOMEM;
653
654         regset->regs = sec_dfx_regs;
655         regset->nregs = ARRAY_SIZE(sec_dfx_regs);
656         regset->base = qm->io_base;
657
658         if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
659                 debugfs_create_regset32("regs", 0444, tmp_d, regset);
660
661         for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
662                 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
663                                         sec_dfx_labels[i].offset);
664                 debugfs_create_file(sec_dfx_labels[i].name, 0644,
665                                    tmp_d, data, &sec_atomic64_ops);
666         }
667
668         return 0;
669 }
670
671 static int sec_debug_init(struct hisi_qm *qm)
672 {
673         struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
674         int i;
675
676         if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
677                 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
678                         spin_lock_init(&sec->debug.files[i].lock);
679                         sec->debug.files[i].index = i;
680                         sec->debug.files[i].qm = qm;
681
682                         debugfs_create_file(sec_dbg_file_name[i], 0600,
683                                                   qm->debug.debug_root,
684                                                   sec->debug.files + i,
685                                                   &sec_dbg_fops);
686                 }
687         }
688
689         return sec_core_debug_init(qm);
690 }
691
692 static int sec_debugfs_init(struct hisi_qm *qm)
693 {
694         struct device *dev = &qm->pdev->dev;
695         int ret;
696
697         qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
698                                                   sec_debugfs_root);
699         qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
700         qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
701         hisi_qm_debug_init(qm);
702
703         ret = sec_debug_init(qm);
704         if (ret)
705                 goto failed_to_create;
706
707         return 0;
708
709 failed_to_create:
710         debugfs_remove_recursive(sec_debugfs_root);
711         return ret;
712 }
713
714 static void sec_debugfs_exit(struct hisi_qm *qm)
715 {
716         debugfs_remove_recursive(qm->debug.debug_root);
717 }
718
719 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
720 {
721         const struct sec_hw_error *errs = sec_hw_errors;
722         struct device *dev = &qm->pdev->dev;
723         u32 err_val;
724
725         while (errs->msg) {
726                 if (errs->int_msk & err_sts) {
727                         dev_err(dev, "%s [error status=0x%x] found\n",
728                                         errs->msg, errs->int_msk);
729
730                         if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
731                                 err_val = readl(qm->io_base +
732                                                 SEC_CORE_SRAM_ECC_ERR_INFO);
733                                 dev_err(dev, "multi ecc sram num=0x%x\n",
734                                                 ((err_val) >> SEC_ECC_NUM) &
735                                                 SEC_ECC_MASH);
736                         }
737                 }
738                 errs++;
739         }
740 }
741
742 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
743 {
744         return readl(qm->io_base + SEC_CORE_INT_STATUS);
745 }
746
747 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
748 {
749         writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
750 }
751
752 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
753 {
754         u32 val;
755
756         val = readl(qm->io_base + SEC_CONTROL_REG);
757         writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
758         writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
759 }
760
761 static const struct hisi_qm_err_ini sec_err_ini = {
762         .hw_init                = sec_set_user_domain_and_cache,
763         .hw_err_enable          = sec_hw_error_enable,
764         .hw_err_disable         = sec_hw_error_disable,
765         .get_dev_hw_err_status  = sec_get_hw_err_status,
766         .clear_dev_hw_err_status = sec_clear_hw_err_status,
767         .log_dev_hw_err         = sec_log_hw_error,
768         .open_axi_master_ooo    = sec_open_axi_master_ooo,
769         .err_info               = {
770                 .ce             = QM_BASE_CE,
771                 .nfe            = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
772                                   QM_ACC_WB_NOT_READY_TIMEOUT,
773                 .fe             = 0,
774                 .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
775                 .dev_ce_mask    = SEC_RAS_CE_ENB_MSK,
776                 .msi_wr_port    = BIT(0),
777                 .acpi_rst       = "SRST",
778         }
779 };
780
781 static int sec_pf_probe_init(struct sec_dev *sec)
782 {
783         struct hisi_qm *qm = &sec->qm;
784         int ret;
785
786         qm->err_ini = &sec_err_ini;
787
788         ret = sec_set_user_domain_and_cache(qm);
789         if (ret)
790                 return ret;
791
792         hisi_qm_dev_err_init(qm);
793         sec_debug_regs_clear(qm);
794
795         return 0;
796 }
797
798 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
799 {
800         int ret;
801
802         qm->pdev = pdev;
803         qm->ver = pdev->revision;
804         qm->algs = "cipher\ndigest\naead\n";
805         qm->mode = uacce_mode;
806         qm->sqe_size = SEC_SQE_SIZE;
807         qm->dev_name = sec_name;
808
809         qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
810                         QM_HW_PF : QM_HW_VF;
811         if (qm->fun_type == QM_HW_PF) {
812                 qm->qp_base = SEC_PF_DEF_Q_BASE;
813                 qm->qp_num = pf_q_num;
814                 qm->debug.curr_qm_qp_num = pf_q_num;
815                 qm->qm_list = &sec_devices;
816         } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
817                 /*
818                  * have no way to get qm configure in VM in v1 hardware,
819                  * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
820                  * to trigger only one VF in v1 hardware.
821                  * v2 hardware has no such problem.
822                  */
823                 qm->qp_base = SEC_PF_DEF_Q_NUM;
824                 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
825         }
826
827         /*
828          * WQ_HIGHPRI: SEC request must be low delayed,
829          * so need a high priority workqueue.
830          * WQ_UNBOUND: SEC task is likely with long
831          * running CPU intensive workloads.
832          */
833         qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
834                                  WQ_UNBOUND, num_online_cpus(),
835                                  pci_name(qm->pdev));
836         if (!qm->wq) {
837                 pci_err(qm->pdev, "fail to alloc workqueue\n");
838                 return -ENOMEM;
839         }
840
841         ret = hisi_qm_init(qm);
842         if (ret)
843                 destroy_workqueue(qm->wq);
844
845         return ret;
846 }
847
848 static void sec_qm_uninit(struct hisi_qm *qm)
849 {
850         hisi_qm_uninit(qm);
851 }
852
853 static int sec_probe_init(struct sec_dev *sec)
854 {
855         struct hisi_qm *qm = &sec->qm;
856         int ret;
857
858         if (qm->fun_type == QM_HW_PF) {
859                 ret = sec_pf_probe_init(sec);
860                 if (ret)
861                         return ret;
862         }
863
864         return 0;
865 }
866
867 static void sec_probe_uninit(struct hisi_qm *qm)
868 {
869         hisi_qm_dev_err_uninit(qm);
870
871         destroy_workqueue(qm->wq);
872 }
873
874 static void sec_iommu_used_check(struct sec_dev *sec)
875 {
876         struct iommu_domain *domain;
877         struct device *dev = &sec->qm.pdev->dev;
878
879         domain = iommu_get_domain_for_dev(dev);
880
881         /* Check if iommu is used */
882         sec->iommu_used = false;
883         if (domain) {
884                 if (domain->type & __IOMMU_DOMAIN_PAGING)
885                         sec->iommu_used = true;
886                 dev_info(dev, "SMMU Opened, the iommu type = %u\n",
887                         domain->type);
888         }
889 }
890
891 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
892 {
893         struct sec_dev *sec;
894         struct hisi_qm *qm;
895         int ret;
896
897         sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
898         if (!sec)
899                 return -ENOMEM;
900
901         qm = &sec->qm;
902         ret = sec_qm_init(qm, pdev);
903         if (ret) {
904                 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
905                 return ret;
906         }
907
908         sec->ctx_q_num = ctx_q_num;
909         sec_iommu_used_check(sec);
910
911         ret = sec_probe_init(sec);
912         if (ret) {
913                 pci_err(pdev, "Failed to probe!\n");
914                 goto err_qm_uninit;
915         }
916
917         ret = hisi_qm_start(qm);
918         if (ret) {
919                 pci_err(pdev, "Failed to start sec qm!\n");
920                 goto err_probe_uninit;
921         }
922
923         ret = sec_debugfs_init(qm);
924         if (ret)
925                 pci_warn(pdev, "Failed to init debugfs!\n");
926
927         ret = hisi_qm_alg_register(qm, &sec_devices);
928         if (ret < 0) {
929                 pr_err("Failed to register driver to crypto.\n");
930                 goto err_qm_stop;
931         }
932
933         if (qm->uacce) {
934                 ret = uacce_register(qm->uacce);
935                 if (ret) {
936                         pci_err(pdev, "failed to register uacce (%d)!\n", ret);
937                         goto err_alg_unregister;
938                 }
939         }
940
941         if (qm->fun_type == QM_HW_PF && vfs_num) {
942                 ret = hisi_qm_sriov_enable(pdev, vfs_num);
943                 if (ret < 0)
944                         goto err_alg_unregister;
945         }
946
947         return 0;
948
949 err_alg_unregister:
950         hisi_qm_alg_unregister(qm, &sec_devices);
951 err_qm_stop:
952         sec_debugfs_exit(qm);
953         hisi_qm_stop(qm, QM_NORMAL);
954 err_probe_uninit:
955         sec_probe_uninit(qm);
956 err_qm_uninit:
957         sec_qm_uninit(qm);
958         return ret;
959 }
960
961 static void sec_remove(struct pci_dev *pdev)
962 {
963         struct hisi_qm *qm = pci_get_drvdata(pdev);
964
965         hisi_qm_wait_task_finish(qm, &sec_devices);
966         hisi_qm_alg_unregister(qm, &sec_devices);
967         if (qm->fun_type == QM_HW_PF && qm->vfs_num)
968                 hisi_qm_sriov_disable(pdev, true);
969
970         sec_debugfs_exit(qm);
971
972         (void)hisi_qm_stop(qm, QM_NORMAL);
973
974         if (qm->fun_type == QM_HW_PF)
975                 sec_debug_regs_clear(qm);
976
977         sec_probe_uninit(qm);
978
979         sec_qm_uninit(qm);
980 }
981
982 static const struct pci_error_handlers sec_err_handler = {
983         .error_detected = hisi_qm_dev_err_detected,
984         .slot_reset     = hisi_qm_dev_slot_reset,
985         .reset_prepare  = hisi_qm_reset_prepare,
986         .reset_done     = hisi_qm_reset_done,
987 };
988
989 static struct pci_driver sec_pci_driver = {
990         .name = "hisi_sec2",
991         .id_table = sec_dev_ids,
992         .probe = sec_probe,
993         .remove = sec_remove,
994         .err_handler = &sec_err_handler,
995         .sriov_configure = hisi_qm_sriov_configure,
996         .shutdown = hisi_qm_dev_shutdown,
997 };
998
999 static void sec_register_debugfs(void)
1000 {
1001         if (!debugfs_initialized())
1002                 return;
1003
1004         sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
1005 }
1006
1007 static void sec_unregister_debugfs(void)
1008 {
1009         debugfs_remove_recursive(sec_debugfs_root);
1010 }
1011
1012 static int __init sec_init(void)
1013 {
1014         int ret;
1015
1016         hisi_qm_init_list(&sec_devices);
1017         sec_register_debugfs();
1018
1019         ret = pci_register_driver(&sec_pci_driver);
1020         if (ret < 0) {
1021                 sec_unregister_debugfs();
1022                 pr_err("Failed to register pci driver.\n");
1023                 return ret;
1024         }
1025
1026         return 0;
1027 }
1028
1029 static void __exit sec_exit(void)
1030 {
1031         pci_unregister_driver(&sec_pci_driver);
1032         sec_unregister_debugfs();
1033 }
1034
1035 module_init(sec_init);
1036 module_exit(sec_exit);
1037
1038 MODULE_LICENSE("GPL v2");
1039 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1040 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
1041 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
1042 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
1043 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");