Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6-microblaze.git] / arch / powerpc / kernel / dt_cpu_ftrs.c
1 /*
2  * Copyright 2017, Nicholas Piggin, IBM Corporation
3  * Licensed under GPLv2.
4  */
5
6 #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
7
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/jump_label.h>
11 #include <linux/libfdt.h>
12 #include <linux/memblock.h>
13 #include <linux/printk.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/threads.h>
17
18 #include <asm/cputable.h>
19 #include <asm/dt_cpu_ftrs.h>
20 #include <asm/mmu.h>
21 #include <asm/oprofile_impl.h>
22 #include <asm/prom.h>
23 #include <asm/setup.h>
24
25
26 /* Device-tree visible constants follow */
27 #define ISA_V2_07B      2070
28 #define ISA_V3_0B       3000
29
30 #define USABLE_PR               (1U << 0)
31 #define USABLE_OS               (1U << 1)
32 #define USABLE_HV               (1U << 2)
33
34 #define HV_SUPPORT_HFSCR        (1U << 0)
35 #define OS_SUPPORT_FSCR         (1U << 0)
36
37 /* For parsing, we define all bits set as "NONE" case */
38 #define HV_SUPPORT_NONE         0xffffffffU
39 #define OS_SUPPORT_NONE         0xffffffffU
40
41 struct dt_cpu_feature {
42         const char *name;
43         uint32_t isa;
44         uint32_t usable_privilege;
45         uint32_t hv_support;
46         uint32_t os_support;
47         uint32_t hfscr_bit_nr;
48         uint32_t fscr_bit_nr;
49         uint32_t hwcap_bit_nr;
50         /* fdt parsing */
51         unsigned long node;
52         int enabled;
53         int disabled;
54 };
55
56 #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
57
58 #define COMMON_USER_BASE        (PPC_FEATURE_32 | PPC_FEATURE_64 | \
59                                  PPC_FEATURE_ARCH_2_06 |\
60                                  PPC_FEATURE_ICACHE_SNOOP)
61 #define COMMON_USER2_BASE       (PPC_FEATURE2_ARCH_2_07 | \
62                                  PPC_FEATURE2_ISEL)
63 /*
64  * Set up the base CPU
65  */
66
67 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
68 extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
69
70 static int hv_mode;
71
72 static struct {
73         u64     lpcr;
74         u64     lpcr_clear;
75         u64     hfscr;
76         u64     fscr;
77 } system_registers;
78
79 static void (*init_pmu_registers)(void);
80
81 static void __restore_cpu_cpufeatures(void)
82 {
83         u64 lpcr;
84
85         /*
86          * LPCR is restored by the power on engine already. It can be changed
87          * after early init e.g., by radix enable, and we have no unified API
88          * for saving and restoring such SPRs.
89          *
90          * This ->restore hook should really be removed from idle and register
91          * restore moved directly into the idle restore code, because this code
92          * doesn't know how idle is implemented or what it needs restored here.
93          *
94          * The best we can do to accommodate secondary boot and idle restore
95          * for now is "or" LPCR with existing.
96          */
97         lpcr = mfspr(SPRN_LPCR);
98         lpcr |= system_registers.lpcr;
99         lpcr &= ~system_registers.lpcr_clear;
100         mtspr(SPRN_LPCR, lpcr);
101         if (hv_mode) {
102                 mtspr(SPRN_LPID, 0);
103                 mtspr(SPRN_HFSCR, system_registers.hfscr);
104         }
105         mtspr(SPRN_FSCR, system_registers.fscr);
106
107         if (init_pmu_registers)
108                 init_pmu_registers();
109 }
110
111 static char dt_cpu_name[64];
112
113 static struct cpu_spec __initdata base_cpu_spec = {
114         .cpu_name               = NULL,
115         .cpu_features           = CPU_FTRS_DT_CPU_BASE,
116         .cpu_user_features      = COMMON_USER_BASE,
117         .cpu_user_features2     = COMMON_USER2_BASE,
118         .mmu_features           = 0,
119         .icache_bsize           = 32, /* minimum block size, fixed by */
120         .dcache_bsize           = 32, /* cache info init.             */
121         .num_pmcs               = 0,
122         .pmc_type               = PPC_PMC_DEFAULT,
123         .oprofile_cpu_type      = NULL,
124         .oprofile_type          = PPC_OPROFILE_INVALID,
125         .cpu_setup              = NULL,
126         .cpu_restore            = __restore_cpu_cpufeatures,
127         .machine_check_early    = NULL,
128         .platform               = NULL,
129 };
130
131 static void __init cpufeatures_setup_cpu(void)
132 {
133         set_cur_cpu_spec(&base_cpu_spec);
134
135         cur_cpu_spec->pvr_mask = -1;
136         cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
137
138         /* Initialize the base environment -- clear FSCR/HFSCR.  */
139         hv_mode = !!(mfmsr() & MSR_HV);
140         if (hv_mode) {
141                 /* CPU_FTR_HVMODE is used early in PACA setup */
142                 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
143                 mtspr(SPRN_HFSCR, 0);
144         }
145         mtspr(SPRN_FSCR, 0);
146
147         /*
148          * LPCR does not get cleared, to match behaviour with secondaries
149          * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
150          * could clear LPCR too.
151          */
152 }
153
154 static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
155 {
156         if (f->hv_support == HV_SUPPORT_NONE) {
157         } else if (f->hv_support & HV_SUPPORT_HFSCR) {
158                 u64 hfscr = mfspr(SPRN_HFSCR);
159                 hfscr |= 1UL << f->hfscr_bit_nr;
160                 mtspr(SPRN_HFSCR, hfscr);
161         } else {
162                 /* Does not have a known recipe */
163                 return 0;
164         }
165
166         if (f->os_support == OS_SUPPORT_NONE) {
167         } else if (f->os_support & OS_SUPPORT_FSCR) {
168                 u64 fscr = mfspr(SPRN_FSCR);
169                 fscr |= 1UL << f->fscr_bit_nr;
170                 mtspr(SPRN_FSCR, fscr);
171         } else {
172                 /* Does not have a known recipe */
173                 return 0;
174         }
175
176         if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
177                 uint32_t word = f->hwcap_bit_nr / 32;
178                 uint32_t bit = f->hwcap_bit_nr % 32;
179
180                 if (word == 0)
181                         cur_cpu_spec->cpu_user_features |= 1U << bit;
182                 else if (word == 1)
183                         cur_cpu_spec->cpu_user_features2 |= 1U << bit;
184                 else
185                         pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
186         }
187
188         return 1;
189 }
190
191 static int __init feat_enable(struct dt_cpu_feature *f)
192 {
193         if (f->hv_support != HV_SUPPORT_NONE) {
194                 if (f->hfscr_bit_nr != -1) {
195                         u64 hfscr = mfspr(SPRN_HFSCR);
196                         hfscr |= 1UL << f->hfscr_bit_nr;
197                         mtspr(SPRN_HFSCR, hfscr);
198                 }
199         }
200
201         if (f->os_support != OS_SUPPORT_NONE) {
202                 if (f->fscr_bit_nr != -1) {
203                         u64 fscr = mfspr(SPRN_FSCR);
204                         fscr |= 1UL << f->fscr_bit_nr;
205                         mtspr(SPRN_FSCR, fscr);
206                 }
207         }
208
209         if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
210                 uint32_t word = f->hwcap_bit_nr / 32;
211                 uint32_t bit = f->hwcap_bit_nr % 32;
212
213                 if (word == 0)
214                         cur_cpu_spec->cpu_user_features |= 1U << bit;
215                 else if (word == 1)
216                         cur_cpu_spec->cpu_user_features2 |= 1U << bit;
217                 else
218                         pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
219         }
220
221         return 1;
222 }
223
224 static int __init feat_disable(struct dt_cpu_feature *f)
225 {
226         return 0;
227 }
228
229 static int __init feat_enable_hv(struct dt_cpu_feature *f)
230 {
231         u64 lpcr;
232
233         if (!hv_mode) {
234                 pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
235                 return 0;
236         }
237
238         mtspr(SPRN_LPID, 0);
239
240         lpcr = mfspr(SPRN_LPCR);
241         lpcr &=  ~LPCR_LPES0; /* HV external interrupts */
242         mtspr(SPRN_LPCR, lpcr);
243
244         cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
245
246         return 1;
247 }
248
249 static int __init feat_enable_le(struct dt_cpu_feature *f)
250 {
251         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
252         return 1;
253 }
254
255 static int __init feat_enable_smt(struct dt_cpu_feature *f)
256 {
257         cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
258         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
259         return 1;
260 }
261
262 static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
263 {
264         u64 lpcr;
265
266         /* Set PECE wakeup modes for ISA 207 */
267         lpcr = mfspr(SPRN_LPCR);
268         lpcr |=  LPCR_PECE0;
269         lpcr |=  LPCR_PECE1;
270         lpcr |=  LPCR_PECE2;
271         mtspr(SPRN_LPCR, lpcr);
272
273         return 1;
274 }
275
276 static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
277 {
278         cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
279
280         return 1;
281 }
282
283 static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
284 {
285         u64 lpcr;
286
287         /* Set PECE wakeup modes for ISAv3.0B */
288         lpcr = mfspr(SPRN_LPCR);
289         lpcr |=  LPCR_PECE0;
290         lpcr |=  LPCR_PECE1;
291         lpcr |=  LPCR_PECE2;
292         mtspr(SPRN_LPCR, lpcr);
293
294         return 1;
295 }
296
297 static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
298 {
299         u64 lpcr;
300
301         lpcr = mfspr(SPRN_LPCR);
302         lpcr &= ~LPCR_ISL;
303
304         /* VRMASD */
305         lpcr |= LPCR_VPM0;
306         lpcr &= ~LPCR_VPM1;
307         lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
308         mtspr(SPRN_LPCR, lpcr);
309
310         cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
311         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
312
313         return 1;
314 }
315
316 static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
317 {
318         u64 lpcr;
319
320         system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
321         lpcr = mfspr(SPRN_LPCR);
322         lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
323         mtspr(SPRN_LPCR, lpcr);
324
325         cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
326         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
327
328         return 1;
329 }
330
331
332 static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
333 {
334 #ifdef CONFIG_PPC_RADIX_MMU
335         cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
336         cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
337         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
338
339         return 1;
340 #endif
341         return 0;
342 }
343
344 static int __init feat_enable_dscr(struct dt_cpu_feature *f)
345 {
346         u64 lpcr;
347
348         feat_enable(f);
349
350         lpcr = mfspr(SPRN_LPCR);
351         lpcr &= ~LPCR_DPFD;
352         lpcr |=  (4UL << LPCR_DPFD_SH);
353         mtspr(SPRN_LPCR, lpcr);
354
355         return 1;
356 }
357
358 static void hfscr_pmu_enable(void)
359 {
360         u64 hfscr = mfspr(SPRN_HFSCR);
361         hfscr |= PPC_BIT(60);
362         mtspr(SPRN_HFSCR, hfscr);
363 }
364
365 static void init_pmu_power8(void)
366 {
367         if (hv_mode) {
368                 mtspr(SPRN_MMCRC, 0);
369                 mtspr(SPRN_MMCRH, 0);
370         }
371
372         mtspr(SPRN_MMCRA, 0);
373         mtspr(SPRN_MMCR0, 0);
374         mtspr(SPRN_MMCR1, 0);
375         mtspr(SPRN_MMCR2, 0);
376         mtspr(SPRN_MMCRS, 0);
377 }
378
379 static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
380 {
381         cur_cpu_spec->platform = "power8";
382         cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
383
384         return 1;
385 }
386
387 static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
388 {
389         hfscr_pmu_enable();
390
391         init_pmu_power8();
392         init_pmu_registers = init_pmu_power8;
393
394         cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
395         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
396         if (pvr_version_is(PVR_POWER8E))
397                 cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
398
399         cur_cpu_spec->num_pmcs          = 6;
400         cur_cpu_spec->pmc_type          = PPC_PMC_IBM;
401         cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
402
403         return 1;
404 }
405
406 static void init_pmu_power9(void)
407 {
408         if (hv_mode)
409                 mtspr(SPRN_MMCRC, 0);
410
411         mtspr(SPRN_MMCRA, 0);
412         mtspr(SPRN_MMCR0, 0);
413         mtspr(SPRN_MMCR1, 0);
414         mtspr(SPRN_MMCR2, 0);
415 }
416
417 static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
418 {
419         cur_cpu_spec->platform = "power9";
420         cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
421
422         return 1;
423 }
424
425 static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
426 {
427         hfscr_pmu_enable();
428
429         init_pmu_power9();
430         init_pmu_registers = init_pmu_power9;
431
432         cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
433         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
434
435         cur_cpu_spec->num_pmcs          = 6;
436         cur_cpu_spec->pmc_type          = PPC_PMC_IBM;
437         cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
438
439         return 1;
440 }
441
442 static int __init feat_enable_tm(struct dt_cpu_feature *f)
443 {
444 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
445         feat_enable(f);
446         cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
447         return 1;
448 #endif
449         return 0;
450 }
451
452 static int __init feat_enable_fp(struct dt_cpu_feature *f)
453 {
454         feat_enable(f);
455         cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
456
457         return 1;
458 }
459
460 static int __init feat_enable_vector(struct dt_cpu_feature *f)
461 {
462 #ifdef CONFIG_ALTIVEC
463         feat_enable(f);
464         cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
465         cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
466         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
467
468         return 1;
469 #endif
470         return 0;
471 }
472
473 static int __init feat_enable_vsx(struct dt_cpu_feature *f)
474 {
475 #ifdef CONFIG_VSX
476         feat_enable(f);
477         cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
478         cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
479
480         return 1;
481 #endif
482         return 0;
483 }
484
485 static int __init feat_enable_purr(struct dt_cpu_feature *f)
486 {
487         cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
488
489         return 1;
490 }
491
492 static int __init feat_enable_ebb(struct dt_cpu_feature *f)
493 {
494         /*
495          * PPC_FEATURE2_EBB is enabled in PMU init code because it has
496          * historically been related to the PMU facility. This may have
497          * to be decoupled if EBB becomes more generic. For now, follow
498          * existing convention.
499          */
500         f->hwcap_bit_nr = -1;
501         feat_enable(f);
502
503         return 1;
504 }
505
506 static int __init feat_enable_dbell(struct dt_cpu_feature *f)
507 {
508         u64 lpcr;
509
510         /* P9 has an HFSCR for privileged state */
511         feat_enable(f);
512
513         cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
514
515         lpcr = mfspr(SPRN_LPCR);
516         lpcr |=  LPCR_PECEDH; /* hyp doorbell wakeup */
517         mtspr(SPRN_LPCR, lpcr);
518
519         return 1;
520 }
521
522 static int __init feat_enable_hvi(struct dt_cpu_feature *f)
523 {
524         u64 lpcr;
525
526         /*
527          * POWER9 XIVE interrupts including in OPAL XICS compatibility
528          * are always delivered as hypervisor virtualization interrupts (HVI)
529          * rather than EE.
530          *
531          * However LPES0 is not set here, in the chance that an EE does get
532          * delivered to the host somehow, the EE handler would not expect it
533          * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
534          * happen if there is a bug in interrupt controller code, or IC is
535          * misconfigured in systemsim.
536          */
537
538         lpcr = mfspr(SPRN_LPCR);
539         lpcr |= LPCR_HVICE;     /* enable hvi interrupts */
540         lpcr |= LPCR_HEIC;      /* disable ee interrupts when MSR_HV */
541         lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
542         mtspr(SPRN_LPCR, lpcr);
543
544         return 1;
545 }
546
547 static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
548 {
549         cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
550
551         return 1;
552 }
553
554 struct dt_cpu_feature_match {
555         const char *name;
556         int (*enable)(struct dt_cpu_feature *f);
557         u64 cpu_ftr_bit_mask;
558 };
559
560 static struct dt_cpu_feature_match __initdata
561                 dt_cpu_feature_match_table[] = {
562         {"hypervisor", feat_enable_hv, 0},
563         {"big-endian", feat_enable, 0},
564         {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
565         {"smt", feat_enable_smt, 0},
566         {"interrupt-facilities", feat_enable, 0},
567         {"timer-facilities", feat_enable, 0},
568         {"timer-facilities-v3", feat_enable, 0},
569         {"debug-facilities", feat_enable, 0},
570         {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
571         {"branch-tracing", feat_enable, 0},
572         {"floating-point", feat_enable_fp, 0},
573         {"vector", feat_enable_vector, 0},
574         {"vector-scalar", feat_enable_vsx, 0},
575         {"vector-scalar-v3", feat_enable, 0},
576         {"decimal-floating-point", feat_enable, 0},
577         {"decimal-integer", feat_enable, 0},
578         {"quadword-load-store", feat_enable, 0},
579         {"vector-crypto", feat_enable, 0},
580         {"mmu-hash", feat_enable_mmu_hash, 0},
581         {"mmu-radix", feat_enable_mmu_radix, 0},
582         {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
583         {"virtual-page-class-key-protection", feat_enable, 0},
584         {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
585         {"transactional-memory-v3", feat_enable_tm, 0},
586         {"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
587         {"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
588         {"idle-nap", feat_enable_idle_nap, 0},
589         {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
590         {"idle-stop", feat_enable_idle_stop, 0},
591         {"machine-check-power8", feat_enable_mce_power8, 0},
592         {"performance-monitor-power8", feat_enable_pmu_power8, 0},
593         {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
594         {"event-based-branch", feat_enable_ebb, 0},
595         {"target-address-register", feat_enable, 0},
596         {"branch-history-rolling-buffer", feat_enable, 0},
597         {"control-register", feat_enable, CPU_FTR_CTRL},
598         {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
599         {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
600         {"processor-utilization-of-resources-register", feat_enable_purr, 0},
601         {"no-execute", feat_enable, 0},
602         {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
603         {"cache-inhibited-large-page", feat_enable_large_ci, 0},
604         {"coprocessor-icswx", feat_enable, 0},
605         {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
606         {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
607         {"wait", feat_enable, 0},
608         {"atomic-memory-operations", feat_enable, 0},
609         {"branch-v3", feat_enable, 0},
610         {"copy-paste", feat_enable, 0},
611         {"decimal-floating-point-v3", feat_enable, 0},
612         {"decimal-integer-v3", feat_enable, 0},
613         {"fixed-point-v3", feat_enable, 0},
614         {"floating-point-v3", feat_enable, 0},
615         {"group-start-register", feat_enable, 0},
616         {"pc-relative-addressing", feat_enable, 0},
617         {"machine-check-power9", feat_enable_mce_power9, 0},
618         {"performance-monitor-power9", feat_enable_pmu_power9, 0},
619         {"event-based-branch-v3", feat_enable, 0},
620         {"random-number-generator", feat_enable, 0},
621         {"system-call-vectored", feat_disable, 0},
622         {"trace-interrupt-v3", feat_enable, 0},
623         {"vector-v3", feat_enable, 0},
624         {"vector-binary128", feat_enable, 0},
625         {"vector-binary16", feat_enable, 0},
626         {"wait-v3", feat_enable, 0},
627 };
628
629 static bool __initdata using_dt_cpu_ftrs;
630 static bool __initdata enable_unknown = true;
631
632 static int __init dt_cpu_ftrs_parse(char *str)
633 {
634         if (!str)
635                 return 0;
636
637         if (!strcmp(str, "off"))
638                 using_dt_cpu_ftrs = false;
639         else if (!strcmp(str, "known"))
640                 enable_unknown = false;
641         else
642                 return 1;
643
644         return 0;
645 }
646 early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
647
648 static void __init cpufeatures_setup_start(u32 isa)
649 {
650         pr_info("setup for ISA %d\n", isa);
651
652         if (isa >= 3000) {
653                 cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
654                 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
655         }
656 }
657
658 static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
659 {
660         const struct dt_cpu_feature_match *m;
661         bool known = false;
662         int i;
663
664         for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
665                 m = &dt_cpu_feature_match_table[i];
666                 if (!strcmp(f->name, m->name)) {
667                         known = true;
668                         if (m->enable(f))
669                                 break;
670
671                         pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
672                                 f->name);
673                         return false;
674                 }
675         }
676
677         if (!known && enable_unknown) {
678                 if (!feat_try_enable_unknown(f)) {
679                         pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
680                                 f->name);
681                         return false;
682                 }
683         }
684
685         if (m->cpu_ftr_bit_mask)
686                 cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
687
688         if (known)
689                 pr_debug("enabling: %s\n", f->name);
690         else
691                 pr_debug("enabling: %s (unknown)\n", f->name);
692
693         return true;
694 }
695
696 static __init void cpufeatures_cpu_quirks(void)
697 {
698         int version = mfspr(SPRN_PVR);
699
700         /*
701          * Not all quirks can be derived from the cpufeatures device tree.
702          */
703         if ((version & 0xffffff00) == 0x004e0100)
704                 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
705         else if ((version & 0xffffefff) == 0x004e0200)
706                 ; /* DD2.0 has no feature flag */
707         else if ((version & 0xffffefff) == 0x004e0201)
708                 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
709         else if ((version & 0xffffefff) == 0x004e0202) {
710                 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
711                 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
712                 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
713         } else /* DD2.1 and up have DD2_1 */
714                 cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
715
716         if ((version & 0xffff0000) == 0x004e0000) {
717                 cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
718                 cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
719         }
720
721         /*
722          * PKEY was not in the initial base or feature node
723          * specification, but it should become optional in the next
724          * cpu feature version sequence.
725          */
726         cur_cpu_spec->cpu_features |= CPU_FTR_PKEY;
727 }
728
729 static void __init cpufeatures_setup_finished(void)
730 {
731         cpufeatures_cpu_quirks();
732
733         if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
734                 pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
735                 cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
736         }
737
738         /* Make sure powerpc_base_platform is non-NULL */
739         powerpc_base_platform = cur_cpu_spec->platform;
740
741         system_registers.lpcr = mfspr(SPRN_LPCR);
742         system_registers.hfscr = mfspr(SPRN_HFSCR);
743         system_registers.fscr = mfspr(SPRN_FSCR);
744
745         pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
746                 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
747 }
748
749 static int __init disabled_on_cmdline(void)
750 {
751         unsigned long root, chosen;
752         const char *p;
753
754         root = of_get_flat_dt_root();
755         chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
756         if (chosen == -FDT_ERR_NOTFOUND)
757                 return false;
758
759         p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
760         if (!p)
761                 return false;
762
763         if (strstr(p, "dt_cpu_ftrs=off"))
764                 return true;
765
766         return false;
767 }
768
769 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
770                                         int depth, void *data)
771 {
772         if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
773             && of_get_flat_dt_prop(node, "isa", NULL))
774                 return 1;
775
776         return 0;
777 }
778
779 bool __init dt_cpu_ftrs_in_use(void)
780 {
781         return using_dt_cpu_ftrs;
782 }
783
784 bool __init dt_cpu_ftrs_init(void *fdt)
785 {
786         using_dt_cpu_ftrs = false;
787
788         /* Setup and verify the FDT, if it fails we just bail */
789         if (!early_init_dt_verify(fdt))
790                 return false;
791
792         if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
793                 return false;
794
795         if (disabled_on_cmdline())
796                 return false;
797
798         cpufeatures_setup_cpu();
799
800         using_dt_cpu_ftrs = true;
801         return true;
802 }
803
804 static int nr_dt_cpu_features;
805 static struct dt_cpu_feature *dt_cpu_features;
806
807 static int __init process_cpufeatures_node(unsigned long node,
808                                           const char *uname, int i)
809 {
810         const __be32 *prop;
811         struct dt_cpu_feature *f;
812         int len;
813
814         f = &dt_cpu_features[i];
815         memset(f, 0, sizeof(struct dt_cpu_feature));
816
817         f->node = node;
818
819         f->name = uname;
820
821         prop = of_get_flat_dt_prop(node, "isa", &len);
822         if (!prop) {
823                 pr_warn("%s: missing isa property\n", uname);
824                 return 0;
825         }
826         f->isa = be32_to_cpup(prop);
827
828         prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
829         if (!prop) {
830                 pr_warn("%s: missing usable-privilege property", uname);
831                 return 0;
832         }
833         f->usable_privilege = be32_to_cpup(prop);
834
835         prop = of_get_flat_dt_prop(node, "hv-support", &len);
836         if (prop)
837                 f->hv_support = be32_to_cpup(prop);
838         else
839                 f->hv_support = HV_SUPPORT_NONE;
840
841         prop = of_get_flat_dt_prop(node, "os-support", &len);
842         if (prop)
843                 f->os_support = be32_to_cpup(prop);
844         else
845                 f->os_support = OS_SUPPORT_NONE;
846
847         prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
848         if (prop)
849                 f->hfscr_bit_nr = be32_to_cpup(prop);
850         else
851                 f->hfscr_bit_nr = -1;
852         prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
853         if (prop)
854                 f->fscr_bit_nr = be32_to_cpup(prop);
855         else
856                 f->fscr_bit_nr = -1;
857         prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
858         if (prop)
859                 f->hwcap_bit_nr = be32_to_cpup(prop);
860         else
861                 f->hwcap_bit_nr = -1;
862
863         if (f->usable_privilege & USABLE_HV) {
864                 if (!(mfmsr() & MSR_HV)) {
865                         pr_warn("%s: HV feature passed to guest\n", uname);
866                         return 0;
867                 }
868
869                 if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
870                         pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
871                         return 0;
872                 }
873
874                 if (f->hv_support == HV_SUPPORT_HFSCR) {
875                         if (f->hfscr_bit_nr == -1) {
876                                 pr_warn("%s: missing hfscr_bit_nr\n", uname);
877                                 return 0;
878                         }
879                 }
880         } else {
881                 if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
882                         pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
883                         return 0;
884                 }
885         }
886
887         if (f->usable_privilege & USABLE_OS) {
888                 if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
889                         pr_warn("%s: unwanted fscr_bit_nr\n", uname);
890                         return 0;
891                 }
892
893                 if (f->os_support == OS_SUPPORT_FSCR) {
894                         if (f->fscr_bit_nr == -1) {
895                                 pr_warn("%s: missing fscr_bit_nr\n", uname);
896                                 return 0;
897                         }
898                 }
899         } else {
900                 if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
901                         pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
902                         return 0;
903                 }
904         }
905
906         if (!(f->usable_privilege & USABLE_PR)) {
907                 if (f->hwcap_bit_nr != -1) {
908                         pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
909                         return 0;
910                 }
911         }
912
913         /* Do all the independent features in the first pass */
914         if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
915                 if (cpufeatures_process_feature(f))
916                         f->enabled = 1;
917                 else
918                         f->disabled = 1;
919         }
920
921         return 0;
922 }
923
924 static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
925 {
926         const __be32 *prop;
927         int len;
928         int nr_deps;
929         int i;
930
931         if (f->enabled || f->disabled)
932                 return;
933
934         prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
935         if (!prop) {
936                 pr_warn("%s: missing dependencies property", f->name);
937                 return;
938         }
939
940         nr_deps = len / sizeof(int);
941
942         for (i = 0; i < nr_deps; i++) {
943                 unsigned long phandle = be32_to_cpu(prop[i]);
944                 int j;
945
946                 for (j = 0; j < nr_dt_cpu_features; j++) {
947                         struct dt_cpu_feature *d = &dt_cpu_features[j];
948
949                         if (of_get_flat_dt_phandle(d->node) == phandle) {
950                                 cpufeatures_deps_enable(d);
951                                 if (d->disabled) {
952                                         f->disabled = 1;
953                                         return;
954                                 }
955                         }
956                 }
957         }
958
959         if (cpufeatures_process_feature(f))
960                 f->enabled = 1;
961         else
962                 f->disabled = 1;
963 }
964
965 static int __init scan_cpufeatures_subnodes(unsigned long node,
966                                           const char *uname,
967                                           void *data)
968 {
969         int *count = data;
970
971         process_cpufeatures_node(node, uname, *count);
972
973         (*count)++;
974
975         return 0;
976 }
977
978 static int __init count_cpufeatures_subnodes(unsigned long node,
979                                           const char *uname,
980                                           void *data)
981 {
982         int *count = data;
983
984         (*count)++;
985
986         return 0;
987 }
988
989 static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
990                                             *uname, int depth, void *data)
991 {
992         const __be32 *prop;
993         int count, i;
994         u32 isa;
995
996         /* We are scanning "ibm,powerpc-cpu-features" nodes only */
997         if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
998                 return 0;
999
1000         prop = of_get_flat_dt_prop(node, "isa", NULL);
1001         if (!prop)
1002                 /* We checked before, "can't happen" */
1003                 return 0;
1004
1005         isa = be32_to_cpup(prop);
1006
1007         /* Count and allocate space for cpu features */
1008         of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
1009                                                 &nr_dt_cpu_features);
1010         dt_cpu_features = __va(
1011                 memblock_alloc(sizeof(struct dt_cpu_feature)*
1012                                 nr_dt_cpu_features, PAGE_SIZE));
1013
1014         cpufeatures_setup_start(isa);
1015
1016         /* Scan nodes into dt_cpu_features and enable those without deps  */
1017         count = 0;
1018         of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
1019
1020         /* Recursive enable remaining features with dependencies */
1021         for (i = 0; i < nr_dt_cpu_features; i++) {
1022                 struct dt_cpu_feature *f = &dt_cpu_features[i];
1023
1024                 cpufeatures_deps_enable(f);
1025         }
1026
1027         prop = of_get_flat_dt_prop(node, "display-name", NULL);
1028         if (prop && strlen((char *)prop) != 0) {
1029                 strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
1030                 cur_cpu_spec->cpu_name = dt_cpu_name;
1031         }
1032
1033         cpufeatures_setup_finished();
1034
1035         memblock_free(__pa(dt_cpu_features),
1036                         sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
1037
1038         return 0;
1039 }
1040
1041 void __init dt_cpu_ftrs_scan(void)
1042 {
1043         if (!using_dt_cpu_ftrs)
1044                 return;
1045
1046         of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1047 }