Merge tag 'dt-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / crypto / hisilicon / sec2 / sec_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3
4 #include <linux/acpi.h>
5 #include <linux/aer.h>
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/iommu.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/seq_file.h>
16 #include <linux/topology.h>
17 #include <linux/uacce.h>
18
19 #include "sec.h"
20
21 #define SEC_VF_NUM                      63
22 #define SEC_QUEUE_NUM_V1                4096
23 #define SEC_PF_PCI_DEVICE_ID            0xa255
24 #define SEC_VF_PCI_DEVICE_ID            0xa256
25
26 #define SEC_BD_ERR_CHK_EN0              0xEFFFFFFF
27 #define SEC_BD_ERR_CHK_EN1              0x7ffff7fd
28 #define SEC_BD_ERR_CHK_EN3              0xffffbfff
29
30 #define SEC_SQE_SIZE                    128
31 #define SEC_SQ_SIZE                     (SEC_SQE_SIZE * QM_Q_DEPTH)
32 #define SEC_PF_DEF_Q_NUM                256
33 #define SEC_PF_DEF_Q_BASE               0
34 #define SEC_CTX_Q_NUM_DEF               2
35 #define SEC_CTX_Q_NUM_MAX               32
36
37 #define SEC_CTRL_CNT_CLR_CE             0x301120
38 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
39 #define SEC_CORE_INT_SOURCE             0x301010
40 #define SEC_CORE_INT_MASK               0x301000
41 #define SEC_CORE_INT_STATUS             0x301008
42 #define SEC_CORE_SRAM_ECC_ERR_INFO      0x301C14
43 #define SEC_ECC_NUM                     16
44 #define SEC_ECC_MASH                    0xFF
45 #define SEC_CORE_INT_DISABLE            0x0
46 #define SEC_CORE_INT_ENABLE             0x7c1ff
47 #define SEC_CORE_INT_CLEAR              0x7c1ff
48 #define SEC_SAA_ENABLE                  0x17f
49
50 #define SEC_RAS_CE_REG                  0x301050
51 #define SEC_RAS_FE_REG                  0x301054
52 #define SEC_RAS_NFE_REG                 0x301058
53 #define SEC_RAS_CE_ENB_MSK              0x88
54 #define SEC_RAS_FE_ENB_MSK              0x0
55 #define SEC_RAS_NFE_ENB_MSK             0x7c177
56 #define SEC_OOO_SHUTDOWN_SEL            0x301014
57 #define SEC_RAS_DISABLE         0x0
58 #define SEC_MEM_START_INIT_REG  0x301100
59 #define SEC_MEM_INIT_DONE_REG           0x301104
60
61 /* clock gating */
62 #define SEC_CONTROL_REG         0x301200
63 #define SEC_DYNAMIC_GATE_REG            0x30121c
64 #define SEC_CORE_AUTO_GATE              0x30212c
65 #define SEC_DYNAMIC_GATE_EN             0x7bff
66 #define SEC_CORE_AUTO_GATE_EN           GENMASK(3, 0)
67 #define SEC_CLK_GATE_ENABLE             BIT(3)
68 #define SEC_CLK_GATE_DISABLE            (~BIT(3))
69
70 #define SEC_TRNG_EN_SHIFT               8
71 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
72 #define SEC_AXI_SHUTDOWN_DISABLE        0xFFFFEFFF
73
74 #define SEC_INTERFACE_USER_CTRL0_REG    0x301220
75 #define SEC_INTERFACE_USER_CTRL1_REG    0x301224
76 #define SEC_SAA_EN_REG                  0x301270
77 #define SEC_BD_ERR_CHK_EN_REG0          0x301380
78 #define SEC_BD_ERR_CHK_EN_REG1          0x301384
79 #define SEC_BD_ERR_CHK_EN_REG3          0x30138c
80
81 #define SEC_USER0_SMMU_NORMAL           (BIT(23) | BIT(15))
82 #define SEC_USER1_SMMU_NORMAL           (BIT(31) | BIT(23) | BIT(15) | BIT(7))
83 #define SEC_USER1_ENABLE_CONTEXT_SSV    BIT(24)
84 #define SEC_USER1_ENABLE_DATA_SSV       BIT(16)
85 #define SEC_USER1_WB_CONTEXT_SSV        BIT(8)
86 #define SEC_USER1_WB_DATA_SSV           BIT(0)
87 #define SEC_USER1_SVA_SET               (SEC_USER1_ENABLE_CONTEXT_SSV | \
88                                         SEC_USER1_ENABLE_DATA_SSV | \
89                                         SEC_USER1_WB_CONTEXT_SSV |  \
90                                         SEC_USER1_WB_DATA_SSV)
91 #define SEC_USER1_SMMU_SVA              (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
92 #define SEC_USER1_SMMU_MASK             (~SEC_USER1_SVA_SET)
93 #define SEC_CORE_INT_STATUS_M_ECC       BIT(2)
94
95 #define SEC_PREFETCH_CFG                0x301130
96 #define SEC_SVA_TRANS                   0x301EC4
97 #define SEC_PREFETCH_ENABLE             (~(BIT(0) | BIT(1) | BIT(11)))
98 #define SEC_PREFETCH_DISABLE            BIT(1)
99 #define SEC_SVA_DISABLE_READY           (BIT(7) | BIT(11))
100
101 #define SEC_DELAY_10_US                 10
102 #define SEC_POLL_TIMEOUT_US             1000
103 #define SEC_DBGFS_VAL_MAX_LEN           20
104 #define SEC_SINGLE_PORT_MAX_TRANS       0x2060
105
106 #define SEC_SQE_MASK_OFFSET             64
107 #define SEC_SQE_MASK_LEN                48
108 #define SEC_SHAPER_TYPE_RATE            128
109
110 struct sec_hw_error {
111         u32 int_msk;
112         const char *msg;
113 };
114
115 struct sec_dfx_item {
116         const char *name;
117         u32 offset;
118 };
119
120 static const char sec_name[] = "hisi_sec2";
121 static struct dentry *sec_debugfs_root;
122
123 static struct hisi_qm_list sec_devices = {
124         .register_to_crypto     = sec_register_to_crypto,
125         .unregister_from_crypto = sec_unregister_from_crypto,
126 };
127
128 static const struct sec_hw_error sec_hw_errors[] = {
129         {
130                 .int_msk = BIT(0),
131                 .msg = "sec_axi_rresp_err_rint"
132         },
133         {
134                 .int_msk = BIT(1),
135                 .msg = "sec_axi_bresp_err_rint"
136         },
137         {
138                 .int_msk = BIT(2),
139                 .msg = "sec_ecc_2bit_err_rint"
140         },
141         {
142                 .int_msk = BIT(3),
143                 .msg = "sec_ecc_1bit_err_rint"
144         },
145         {
146                 .int_msk = BIT(4),
147                 .msg = "sec_req_trng_timeout_rint"
148         },
149         {
150                 .int_msk = BIT(5),
151                 .msg = "sec_fsm_hbeat_rint"
152         },
153         {
154                 .int_msk = BIT(6),
155                 .msg = "sec_channel_req_rng_timeout_rint"
156         },
157         {
158                 .int_msk = BIT(7),
159                 .msg = "sec_bd_err_rint"
160         },
161         {
162                 .int_msk = BIT(8),
163                 .msg = "sec_chain_buff_err_rint"
164         },
165         {
166                 .int_msk = BIT(14),
167                 .msg = "sec_no_secure_access"
168         },
169         {
170                 .int_msk = BIT(15),
171                 .msg = "sec_wrapping_key_auth_err"
172         },
173         {
174                 .int_msk = BIT(16),
175                 .msg = "sec_km_key_crc_fail"
176         },
177         {
178                 .int_msk = BIT(17),
179                 .msg = "sec_axi_poison_err"
180         },
181         {
182                 .int_msk = BIT(18),
183                 .msg = "sec_sva_err"
184         },
185         {}
186 };
187
188 static const char * const sec_dbg_file_name[] = {
189         [SEC_CLEAR_ENABLE] = "clear_enable",
190 };
191
192 static struct sec_dfx_item sec_dfx_labels[] = {
193         {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
194         {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
195         {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
196         {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
197         {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
198         {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
199         {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
200 };
201
202 static const struct debugfs_reg32 sec_dfx_regs[] = {
203         {"SEC_PF_ABNORMAL_INT_SOURCE    ",  0x301010},
204         {"SEC_SAA_EN                    ",  0x301270},
205         {"SEC_BD_LATENCY_MIN            ",  0x301600},
206         {"SEC_BD_LATENCY_MAX            ",  0x301608},
207         {"SEC_BD_LATENCY_AVG            ",  0x30160C},
208         {"SEC_BD_NUM_IN_SAA0            ",  0x301670},
209         {"SEC_BD_NUM_IN_SAA1            ",  0x301674},
210         {"SEC_BD_NUM_IN_SEC             ",  0x301680},
211         {"SEC_ECC_1BIT_CNT              ",  0x301C00},
212         {"SEC_ECC_1BIT_INFO             ",  0x301C04},
213         {"SEC_ECC_2BIT_CNT              ",  0x301C10},
214         {"SEC_ECC_2BIT_INFO             ",  0x301C14},
215         {"SEC_BD_SAA0                   ",  0x301C20},
216         {"SEC_BD_SAA1                   ",  0x301C24},
217         {"SEC_BD_SAA2                   ",  0x301C28},
218         {"SEC_BD_SAA3                   ",  0x301C2C},
219         {"SEC_BD_SAA4                   ",  0x301C30},
220         {"SEC_BD_SAA5                   ",  0x301C34},
221         {"SEC_BD_SAA6                   ",  0x301C38},
222         {"SEC_BD_SAA7                   ",  0x301C3C},
223         {"SEC_BD_SAA8                   ",  0x301C40},
224 };
225
226 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
227 {
228         return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
229 }
230
231 static const struct kernel_param_ops sec_pf_q_num_ops = {
232         .set = sec_pf_q_num_set,
233         .get = param_get_int,
234 };
235
236 static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
237 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
238 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
239
240 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
241 {
242         u32 ctx_q_num;
243         int ret;
244
245         if (!val)
246                 return -EINVAL;
247
248         ret = kstrtou32(val, 10, &ctx_q_num);
249         if (ret)
250                 return -EINVAL;
251
252         if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
253                 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
254                 return -EINVAL;
255         }
256
257         return param_set_int(val, kp);
258 }
259
260 static const struct kernel_param_ops sec_ctx_q_num_ops = {
261         .set = sec_ctx_q_num_set,
262         .get = param_get_int,
263 };
264 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
265 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
266 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
267
268 static const struct kernel_param_ops vfs_num_ops = {
269         .set = vfs_num_set,
270         .get = param_get_int,
271 };
272
273 static u32 vfs_num;
274 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
275 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
276
277 void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
278 {
279         hisi_qm_free_qps(qps, qp_num);
280         kfree(qps);
281 }
282
283 struct hisi_qp **sec_create_qps(void)
284 {
285         int node = cpu_to_node(smp_processor_id());
286         u32 ctx_num = ctx_q_num;
287         struct hisi_qp **qps;
288         int ret;
289
290         qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
291         if (!qps)
292                 return NULL;
293
294         ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
295         if (!ret)
296                 return qps;
297
298         kfree(qps);
299         return NULL;
300 }
301
302 static const struct kernel_param_ops sec_uacce_mode_ops = {
303         .set = uacce_mode_set,
304         .get = param_get_int,
305 };
306
307 /*
308  * uacce_mode = 0 means sec only register to crypto,
309  * uacce_mode = 1 means sec both register to crypto and uacce.
310  */
311 static u32 uacce_mode = UACCE_MODE_NOUACCE;
312 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
313 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
314
315 static const struct pci_device_id sec_dev_ids[] = {
316         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
317         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
318         { 0, }
319 };
320 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
321
322 static void sec_set_endian(struct hisi_qm *qm)
323 {
324         u32 reg;
325
326         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
327         reg &= ~(BIT(1) | BIT(0));
328         if (!IS_ENABLED(CONFIG_64BIT))
329                 reg |= BIT(1);
330
331
332         if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
333                 reg |= BIT(0);
334
335         writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
336 }
337
338 static void sec_open_sva_prefetch(struct hisi_qm *qm)
339 {
340         u32 val;
341         int ret;
342
343         if (qm->ver < QM_HW_V3)
344                 return;
345
346         /* Enable prefetch */
347         val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
348         val &= SEC_PREFETCH_ENABLE;
349         writel(val, qm->io_base + SEC_PREFETCH_CFG);
350
351         ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
352                                          val, !(val & SEC_PREFETCH_DISABLE),
353                                          SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
354         if (ret)
355                 pci_err(qm->pdev, "failed to open sva prefetch\n");
356 }
357
358 static void sec_close_sva_prefetch(struct hisi_qm *qm)
359 {
360         u32 val;
361         int ret;
362
363         if (qm->ver < QM_HW_V3)
364                 return;
365
366         val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
367         val |= SEC_PREFETCH_DISABLE;
368         writel(val, qm->io_base + SEC_PREFETCH_CFG);
369
370         ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
371                                          val, !(val & SEC_SVA_DISABLE_READY),
372                                          SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
373         if (ret)
374                 pci_err(qm->pdev, "failed to close sva prefetch\n");
375 }
376
377 static void sec_enable_clock_gate(struct hisi_qm *qm)
378 {
379         u32 val;
380
381         if (qm->ver < QM_HW_V3)
382                 return;
383
384         val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
385         val |= SEC_CLK_GATE_ENABLE;
386         writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
387
388         val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
389         val |= SEC_DYNAMIC_GATE_EN;
390         writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
391
392         val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
393         val |= SEC_CORE_AUTO_GATE_EN;
394         writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
395 }
396
397 static void sec_disable_clock_gate(struct hisi_qm *qm)
398 {
399         u32 val;
400
401         /* Kunpeng920 needs to close clock gating */
402         val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
403         val &= SEC_CLK_GATE_DISABLE;
404         writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
405 }
406
407 static int sec_engine_init(struct hisi_qm *qm)
408 {
409         int ret;
410         u32 reg;
411
412         /* disable clock gate control before mem init */
413         sec_disable_clock_gate(qm);
414
415         writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
416
417         ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
418                                          reg, reg & 0x1, SEC_DELAY_10_US,
419                                          SEC_POLL_TIMEOUT_US);
420         if (ret) {
421                 pci_err(qm->pdev, "fail to init sec mem\n");
422                 return ret;
423         }
424
425         reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
426         reg |= (0x1 << SEC_TRNG_EN_SHIFT);
427         writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
428
429         reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
430         reg |= SEC_USER0_SMMU_NORMAL;
431         writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
432
433         reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
434         reg &= SEC_USER1_SMMU_MASK;
435         if (qm->use_sva && qm->ver == QM_HW_V2)
436                 reg |= SEC_USER1_SMMU_SVA;
437         else
438                 reg |= SEC_USER1_SMMU_NORMAL;
439         writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
440
441         writel(SEC_SINGLE_PORT_MAX_TRANS,
442                qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
443
444         writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
445
446         /* Enable sm4 extra mode, as ctr/ecb */
447         writel_relaxed(SEC_BD_ERR_CHK_EN0,
448                        qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
449         /* Enable sm4 xts mode multiple iv */
450         writel_relaxed(SEC_BD_ERR_CHK_EN1,
451                        qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
452         writel_relaxed(SEC_BD_ERR_CHK_EN3,
453                        qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
454
455         /* config endian */
456         sec_set_endian(qm);
457
458         sec_enable_clock_gate(qm);
459
460         return 0;
461 }
462
463 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
464 {
465         /* qm user domain */
466         writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
467         writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
468         writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
469         writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
470         writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
471
472         /* qm cache */
473         writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
474         writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
475
476         /* disable FLR triggered by BME(bus master enable) */
477         writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
478         writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
479
480         /* enable sqc,cqc writeback */
481         writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
482                CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
483                FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
484
485         return sec_engine_init(qm);
486 }
487
488 /* sec_debug_regs_clear() - clear the sec debug regs */
489 static void sec_debug_regs_clear(struct hisi_qm *qm)
490 {
491         int i;
492
493         /* clear sec dfx regs */
494         writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
495         for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
496                 readl(qm->io_base + sec_dfx_regs[i].offset);
497
498         /* clear rdclr_en */
499         writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
500
501         hisi_qm_debug_regs_clear(qm);
502 }
503
504 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
505 {
506         u32 val1, val2;
507
508         val1 = readl(qm->io_base + SEC_CONTROL_REG);
509         if (enable) {
510                 val1 |= SEC_AXI_SHUTDOWN_ENABLE;
511                 val2 = SEC_RAS_NFE_ENB_MSK;
512         } else {
513                 val1 &= SEC_AXI_SHUTDOWN_DISABLE;
514                 val2 = 0x0;
515         }
516
517         if (qm->ver > QM_HW_V2)
518                 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
519
520         writel(val1, qm->io_base + SEC_CONTROL_REG);
521 }
522
523 static void sec_hw_error_enable(struct hisi_qm *qm)
524 {
525         if (qm->ver == QM_HW_V1) {
526                 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
527                 pci_info(qm->pdev, "V1 not support hw error handle\n");
528                 return;
529         }
530
531         /* clear SEC hw error source if having */
532         writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
533
534         /* enable RAS int */
535         writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
536         writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
537         writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
538
539         /* enable SEC block master OOO when nfe occurs on Kunpeng930 */
540         sec_master_ooo_ctrl(qm, true);
541
542         /* enable SEC hw error interrupts */
543         writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
544 }
545
546 static void sec_hw_error_disable(struct hisi_qm *qm)
547 {
548         /* disable SEC hw error interrupts */
549         writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
550
551         /* disable SEC block master OOO when nfe occurs on Kunpeng930 */
552         sec_master_ooo_ctrl(qm, false);
553
554         /* disable RAS int */
555         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
556         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
557         writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
558 }
559
560 static u32 sec_clear_enable_read(struct hisi_qm *qm)
561 {
562         return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
563                         SEC_CTRL_CNT_CLR_CE_BIT;
564 }
565
566 static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
567 {
568         u32 tmp;
569
570         if (val != 1 && val)
571                 return -EINVAL;
572
573         tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
574                ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
575         writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
576
577         return 0;
578 }
579
580 static ssize_t sec_debug_read(struct file *filp, char __user *buf,
581                                size_t count, loff_t *pos)
582 {
583         struct sec_debug_file *file = filp->private_data;
584         char tbuf[SEC_DBGFS_VAL_MAX_LEN];
585         struct hisi_qm *qm = file->qm;
586         u32 val;
587         int ret;
588
589         ret = hisi_qm_get_dfx_access(qm);
590         if (ret)
591                 return ret;
592
593         spin_lock_irq(&file->lock);
594
595         switch (file->index) {
596         case SEC_CLEAR_ENABLE:
597                 val = sec_clear_enable_read(qm);
598                 break;
599         default:
600                 goto err_input;
601         }
602
603         spin_unlock_irq(&file->lock);
604
605         hisi_qm_put_dfx_access(qm);
606         ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
607         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
608
609 err_input:
610         spin_unlock_irq(&file->lock);
611         hisi_qm_put_dfx_access(qm);
612         return -EINVAL;
613 }
614
615 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
616                                size_t count, loff_t *pos)
617 {
618         struct sec_debug_file *file = filp->private_data;
619         char tbuf[SEC_DBGFS_VAL_MAX_LEN];
620         struct hisi_qm *qm = file->qm;
621         unsigned long val;
622         int len, ret;
623
624         if (*pos != 0)
625                 return 0;
626
627         if (count >= SEC_DBGFS_VAL_MAX_LEN)
628                 return -ENOSPC;
629
630         len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
631                                      pos, buf, count);
632         if (len < 0)
633                 return len;
634
635         tbuf[len] = '\0';
636         if (kstrtoul(tbuf, 0, &val))
637                 return -EFAULT;
638
639         ret = hisi_qm_get_dfx_access(qm);
640         if (ret)
641                 return ret;
642
643         spin_lock_irq(&file->lock);
644
645         switch (file->index) {
646         case SEC_CLEAR_ENABLE:
647                 ret = sec_clear_enable_write(qm, val);
648                 if (ret)
649                         goto err_input;
650                 break;
651         default:
652                 ret = -EINVAL;
653                 goto err_input;
654         }
655
656         ret = count;
657
658  err_input:
659         spin_unlock_irq(&file->lock);
660         hisi_qm_put_dfx_access(qm);
661         return ret;
662 }
663
664 static const struct file_operations sec_dbg_fops = {
665         .owner = THIS_MODULE,
666         .open = simple_open,
667         .read = sec_debug_read,
668         .write = sec_debug_write,
669 };
670
671 static int sec_debugfs_atomic64_get(void *data, u64 *val)
672 {
673         *val = atomic64_read((atomic64_t *)data);
674
675         return 0;
676 }
677
678 static int sec_debugfs_atomic64_set(void *data, u64 val)
679 {
680         if (val)
681                 return -EINVAL;
682
683         atomic64_set((atomic64_t *)data, 0);
684
685         return 0;
686 }
687
688 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
689                          sec_debugfs_atomic64_set, "%lld\n");
690
691 static int sec_regs_show(struct seq_file *s, void *unused)
692 {
693         hisi_qm_regs_dump(s, s->private);
694
695         return 0;
696 }
697
698 DEFINE_SHOW_ATTRIBUTE(sec_regs);
699
700 static int sec_core_debug_init(struct hisi_qm *qm)
701 {
702         struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
703         struct device *dev = &qm->pdev->dev;
704         struct sec_dfx *dfx = &sec->debug.dfx;
705         struct debugfs_regset32 *regset;
706         struct dentry *tmp_d;
707         int i;
708
709         tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
710
711         regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
712         if (!regset)
713                 return -ENOMEM;
714
715         regset->regs = sec_dfx_regs;
716         regset->nregs = ARRAY_SIZE(sec_dfx_regs);
717         regset->base = qm->io_base;
718         regset->dev = dev;
719
720         if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
721                 debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
722
723         for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
724                 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
725                                         sec_dfx_labels[i].offset);
726                 debugfs_create_file(sec_dfx_labels[i].name, 0644,
727                                    tmp_d, data, &sec_atomic64_ops);
728         }
729
730         return 0;
731 }
732
733 static int sec_debug_init(struct hisi_qm *qm)
734 {
735         struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
736         int i;
737
738         if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
739                 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
740                         spin_lock_init(&sec->debug.files[i].lock);
741                         sec->debug.files[i].index = i;
742                         sec->debug.files[i].qm = qm;
743
744                         debugfs_create_file(sec_dbg_file_name[i], 0600,
745                                                   qm->debug.debug_root,
746                                                   sec->debug.files + i,
747                                                   &sec_dbg_fops);
748                 }
749         }
750
751         return sec_core_debug_init(qm);
752 }
753
754 static int sec_debugfs_init(struct hisi_qm *qm)
755 {
756         struct device *dev = &qm->pdev->dev;
757         int ret;
758
759         qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
760                                                   sec_debugfs_root);
761         qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
762         qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
763         hisi_qm_debug_init(qm);
764
765         ret = sec_debug_init(qm);
766         if (ret)
767                 goto failed_to_create;
768
769         return 0;
770
771 failed_to_create:
772         debugfs_remove_recursive(sec_debugfs_root);
773         return ret;
774 }
775
776 static void sec_debugfs_exit(struct hisi_qm *qm)
777 {
778         debugfs_remove_recursive(qm->debug.debug_root);
779 }
780
781 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
782 {
783         const struct sec_hw_error *errs = sec_hw_errors;
784         struct device *dev = &qm->pdev->dev;
785         u32 err_val;
786
787         while (errs->msg) {
788                 if (errs->int_msk & err_sts) {
789                         dev_err(dev, "%s [error status=0x%x] found\n",
790                                         errs->msg, errs->int_msk);
791
792                         if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
793                                 err_val = readl(qm->io_base +
794                                                 SEC_CORE_SRAM_ECC_ERR_INFO);
795                                 dev_err(dev, "multi ecc sram num=0x%x\n",
796                                                 ((err_val) >> SEC_ECC_NUM) &
797                                                 SEC_ECC_MASH);
798                         }
799                 }
800                 errs++;
801         }
802 }
803
804 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
805 {
806         return readl(qm->io_base + SEC_CORE_INT_STATUS);
807 }
808
809 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
810 {
811         writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
812 }
813
814 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
815 {
816         u32 val;
817
818         val = readl(qm->io_base + SEC_CONTROL_REG);
819         writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
820         writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
821 }
822
823 static void sec_err_info_init(struct hisi_qm *qm)
824 {
825         struct hisi_qm_err_info *err_info = &qm->err_info;
826
827         err_info->ce = QM_BASE_CE;
828         err_info->fe = 0;
829         err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
830         err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
831         err_info->msi_wr_port = BIT(0);
832         err_info->acpi_rst = "SRST";
833         err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
834                         QM_ACC_WB_NOT_READY_TIMEOUT;
835 }
836
837 static const struct hisi_qm_err_ini sec_err_ini = {
838         .hw_init                = sec_set_user_domain_and_cache,
839         .hw_err_enable          = sec_hw_error_enable,
840         .hw_err_disable         = sec_hw_error_disable,
841         .get_dev_hw_err_status  = sec_get_hw_err_status,
842         .clear_dev_hw_err_status = sec_clear_hw_err_status,
843         .log_dev_hw_err         = sec_log_hw_error,
844         .open_axi_master_ooo    = sec_open_axi_master_ooo,
845         .open_sva_prefetch      = sec_open_sva_prefetch,
846         .close_sva_prefetch     = sec_close_sva_prefetch,
847         .err_info_init          = sec_err_info_init,
848 };
849
850 static int sec_pf_probe_init(struct sec_dev *sec)
851 {
852         struct hisi_qm *qm = &sec->qm;
853         int ret;
854
855         qm->err_ini = &sec_err_ini;
856         qm->err_ini->err_info_init(qm);
857
858         ret = sec_set_user_domain_and_cache(qm);
859         if (ret)
860                 return ret;
861
862         sec_open_sva_prefetch(qm);
863         hisi_qm_dev_err_init(qm);
864         sec_debug_regs_clear(qm);
865
866         return 0;
867 }
868
869 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
870 {
871         int ret;
872
873         qm->pdev = pdev;
874         qm->ver = pdev->revision;
875         qm->algs = "cipher\ndigest\naead";
876         qm->mode = uacce_mode;
877         qm->sqe_size = SEC_SQE_SIZE;
878         qm->dev_name = sec_name;
879
880         qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
881                         QM_HW_PF : QM_HW_VF;
882         if (qm->fun_type == QM_HW_PF) {
883                 qm->qp_base = SEC_PF_DEF_Q_BASE;
884                 qm->qp_num = pf_q_num;
885                 qm->debug.curr_qm_qp_num = pf_q_num;
886                 qm->qm_list = &sec_devices;
887         } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
888                 /*
889                  * have no way to get qm configure in VM in v1 hardware,
890                  * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
891                  * to trigger only one VF in v1 hardware.
892                  * v2 hardware has no such problem.
893                  */
894                 qm->qp_base = SEC_PF_DEF_Q_NUM;
895                 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
896         }
897
898         /*
899          * WQ_HIGHPRI: SEC request must be low delayed,
900          * so need a high priority workqueue.
901          * WQ_UNBOUND: SEC task is likely with long
902          * running CPU intensive workloads.
903          */
904         qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
905                                  WQ_UNBOUND, num_online_cpus(),
906                                  pci_name(qm->pdev));
907         if (!qm->wq) {
908                 pci_err(qm->pdev, "fail to alloc workqueue\n");
909                 return -ENOMEM;
910         }
911
912         ret = hisi_qm_init(qm);
913         if (ret)
914                 destroy_workqueue(qm->wq);
915
916         return ret;
917 }
918
919 static void sec_qm_uninit(struct hisi_qm *qm)
920 {
921         hisi_qm_uninit(qm);
922 }
923
924 static int sec_probe_init(struct sec_dev *sec)
925 {
926         u32 type_rate = SEC_SHAPER_TYPE_RATE;
927         struct hisi_qm *qm = &sec->qm;
928         int ret;
929
930         if (qm->fun_type == QM_HW_PF) {
931                 ret = sec_pf_probe_init(sec);
932                 if (ret)
933                         return ret;
934                 /* enable shaper type 0 */
935                 if (qm->ver >= QM_HW_V3) {
936                         type_rate |= QM_SHAPER_ENABLE;
937                         qm->type_rate = type_rate;
938                 }
939         }
940
941         return 0;
942 }
943
944 static void sec_probe_uninit(struct hisi_qm *qm)
945 {
946         hisi_qm_dev_err_uninit(qm);
947
948         destroy_workqueue(qm->wq);
949 }
950
951 static void sec_iommu_used_check(struct sec_dev *sec)
952 {
953         struct iommu_domain *domain;
954         struct device *dev = &sec->qm.pdev->dev;
955
956         domain = iommu_get_domain_for_dev(dev);
957
958         /* Check if iommu is used */
959         sec->iommu_used = false;
960         if (domain) {
961                 if (domain->type & __IOMMU_DOMAIN_PAGING)
962                         sec->iommu_used = true;
963                 dev_info(dev, "SMMU Opened, the iommu type = %u\n",
964                         domain->type);
965         }
966 }
967
968 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
969 {
970         struct sec_dev *sec;
971         struct hisi_qm *qm;
972         int ret;
973
974         sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
975         if (!sec)
976                 return -ENOMEM;
977
978         qm = &sec->qm;
979         ret = sec_qm_init(qm, pdev);
980         if (ret) {
981                 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
982                 return ret;
983         }
984
985         sec->ctx_q_num = ctx_q_num;
986         sec_iommu_used_check(sec);
987
988         ret = sec_probe_init(sec);
989         if (ret) {
990                 pci_err(pdev, "Failed to probe!\n");
991                 goto err_qm_uninit;
992         }
993
994         ret = hisi_qm_start(qm);
995         if (ret) {
996                 pci_err(pdev, "Failed to start sec qm!\n");
997                 goto err_probe_uninit;
998         }
999
1000         ret = sec_debugfs_init(qm);
1001         if (ret)
1002                 pci_warn(pdev, "Failed to init debugfs!\n");
1003
1004         if (qm->qp_num >= ctx_q_num) {
1005                 ret = hisi_qm_alg_register(qm, &sec_devices);
1006                 if (ret < 0) {
1007                         pr_err("Failed to register driver to crypto.\n");
1008                         goto err_qm_stop;
1009                 }
1010         } else {
1011                 pci_warn(qm->pdev,
1012                         "Failed to use kernel mode, qp not enough!\n");
1013         }
1014
1015         if (qm->uacce) {
1016                 ret = uacce_register(qm->uacce);
1017                 if (ret) {
1018                         pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1019                         goto err_alg_unregister;
1020                 }
1021         }
1022
1023         if (qm->fun_type == QM_HW_PF && vfs_num) {
1024                 ret = hisi_qm_sriov_enable(pdev, vfs_num);
1025                 if (ret < 0)
1026                         goto err_alg_unregister;
1027         }
1028
1029         hisi_qm_pm_init(qm);
1030
1031         return 0;
1032
1033 err_alg_unregister:
1034         if (qm->qp_num >= ctx_q_num)
1035                 hisi_qm_alg_unregister(qm, &sec_devices);
1036 err_qm_stop:
1037         sec_debugfs_exit(qm);
1038         hisi_qm_stop(qm, QM_NORMAL);
1039 err_probe_uninit:
1040         sec_probe_uninit(qm);
1041 err_qm_uninit:
1042         sec_qm_uninit(qm);
1043         return ret;
1044 }
1045
1046 static void sec_remove(struct pci_dev *pdev)
1047 {
1048         struct hisi_qm *qm = pci_get_drvdata(pdev);
1049
1050         hisi_qm_pm_uninit(qm);
1051         hisi_qm_wait_task_finish(qm, &sec_devices);
1052         if (qm->qp_num >= ctx_q_num)
1053                 hisi_qm_alg_unregister(qm, &sec_devices);
1054
1055         if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1056                 hisi_qm_sriov_disable(pdev, true);
1057
1058         sec_debugfs_exit(qm);
1059
1060         (void)hisi_qm_stop(qm, QM_NORMAL);
1061
1062         if (qm->fun_type == QM_HW_PF)
1063                 sec_debug_regs_clear(qm);
1064
1065         sec_probe_uninit(qm);
1066
1067         sec_qm_uninit(qm);
1068 }
1069
1070 static const struct dev_pm_ops sec_pm_ops = {
1071         SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1072 };
1073
1074 static const struct pci_error_handlers sec_err_handler = {
1075         .error_detected = hisi_qm_dev_err_detected,
1076         .slot_reset     = hisi_qm_dev_slot_reset,
1077         .reset_prepare  = hisi_qm_reset_prepare,
1078         .reset_done     = hisi_qm_reset_done,
1079 };
1080
1081 static struct pci_driver sec_pci_driver = {
1082         .name = "hisi_sec2",
1083         .id_table = sec_dev_ids,
1084         .probe = sec_probe,
1085         .remove = sec_remove,
1086         .err_handler = &sec_err_handler,
1087         .sriov_configure = hisi_qm_sriov_configure,
1088         .shutdown = hisi_qm_dev_shutdown,
1089         .driver.pm = &sec_pm_ops,
1090 };
1091
1092 static void sec_register_debugfs(void)
1093 {
1094         if (!debugfs_initialized())
1095                 return;
1096
1097         sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
1098 }
1099
1100 static void sec_unregister_debugfs(void)
1101 {
1102         debugfs_remove_recursive(sec_debugfs_root);
1103 }
1104
1105 static int __init sec_init(void)
1106 {
1107         int ret;
1108
1109         hisi_qm_init_list(&sec_devices);
1110         sec_register_debugfs();
1111
1112         ret = pci_register_driver(&sec_pci_driver);
1113         if (ret < 0) {
1114                 sec_unregister_debugfs();
1115                 pr_err("Failed to register pci driver.\n");
1116                 return ret;
1117         }
1118
1119         return 0;
1120 }
1121
1122 static void __exit sec_exit(void)
1123 {
1124         pci_unregister_driver(&sec_pci_driver);
1125         sec_unregister_debugfs();
1126 }
1127
1128 module_init(sec_init);
1129 module_exit(sec_exit);
1130
1131 MODULE_LICENSE("GPL v2");
1132 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1133 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
1134 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
1135 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
1136 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");