Merge tag 'gvt-next-fixes-2020-08-05' of https://github.com/intel/gvt-linux into...
[linux-2.6-microblaze.git] / arch / powerpc / kernel / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/device.h>
3 #include <linux/cpu.h>
4 #include <linux/smp.h>
5 #include <linux/percpu.h>
6 #include <linux/init.h>
7 #include <linux/sched.h>
8 #include <linux/export.h>
9 #include <linux/nodemask.h>
10 #include <linux/cpumask.h>
11 #include <linux/notifier.h>
12
13 #include <asm/current.h>
14 #include <asm/processor.h>
15 #include <asm/cputable.h>
16 #include <asm/hvcall.h>
17 #include <asm/prom.h>
18 #include <asm/machdep.h>
19 #include <asm/smp.h>
20 #include <asm/pmc.h>
21 #include <asm/firmware.h>
22 #include <asm/idle.h>
23 #include <asm/svm.h>
24
25 #include "cacheinfo.h"
26 #include "setup.h"
27
28 #ifdef CONFIG_PPC64
29 #include <asm/paca.h>
30 #include <asm/lppaca.h>
31 #endif
32
33 static DEFINE_PER_CPU(struct cpu, cpu_devices);
34
35 /*
36  * SMT snooze delay stuff, 64-bit only for now
37  */
38
39 #ifdef CONFIG_PPC64
40
41 /* Time in microseconds we delay before sleeping in the idle loop */
42 static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
43
44 static ssize_t store_smt_snooze_delay(struct device *dev,
45                                       struct device_attribute *attr,
46                                       const char *buf,
47                                       size_t count)
48 {
49         struct cpu *cpu = container_of(dev, struct cpu, dev);
50         ssize_t ret;
51         long snooze;
52
53         ret = sscanf(buf, "%ld", &snooze);
54         if (ret != 1)
55                 return -EINVAL;
56
57         per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
58         return count;
59 }
60
61 static ssize_t show_smt_snooze_delay(struct device *dev,
62                                      struct device_attribute *attr,
63                                      char *buf)
64 {
65         struct cpu *cpu = container_of(dev, struct cpu, dev);
66
67         return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
68 }
69
70 static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
71                    store_smt_snooze_delay);
72
73 static int __init setup_smt_snooze_delay(char *str)
74 {
75         unsigned int cpu;
76         long snooze;
77
78         if (!cpu_has_feature(CPU_FTR_SMT))
79                 return 1;
80
81         snooze = simple_strtol(str, NULL, 10);
82         for_each_possible_cpu(cpu)
83                 per_cpu(smt_snooze_delay, cpu) = snooze;
84
85         return 1;
86 }
87 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
88
89 #endif /* CONFIG_PPC64 */
90
91 #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
92 static void read_##NAME(void *val) \
93 { \
94         *(unsigned long *)val = mfspr(ADDRESS); \
95 } \
96 static void write_##NAME(void *val) \
97 { \
98         EXTRA; \
99         mtspr(ADDRESS, *(unsigned long *)val);  \
100 }
101
102 #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
103 static ssize_t show_##NAME(struct device *dev, \
104                         struct device_attribute *attr, \
105                         char *buf) \
106 { \
107         struct cpu *cpu = container_of(dev, struct cpu, dev); \
108         unsigned long val; \
109         smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);    \
110         return sprintf(buf, "%lx\n", val); \
111 } \
112 static ssize_t __used \
113         store_##NAME(struct device *dev, struct device_attribute *attr, \
114                         const char *buf, size_t count) \
115 { \
116         struct cpu *cpu = container_of(dev, struct cpu, dev); \
117         unsigned long val; \
118         int ret = sscanf(buf, "%lx", &val); \
119         if (ret != 1) \
120                 return -EINVAL; \
121         smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
122         return count; \
123 }
124
125 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
126         __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
127         __SYSFS_SPRSETUP_SHOW_STORE(NAME)
128 #define SYSFS_SPRSETUP(NAME, ADDRESS) \
129         __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
130         __SYSFS_SPRSETUP_SHOW_STORE(NAME)
131
132 #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
133         __SYSFS_SPRSETUP_SHOW_STORE(NAME)
134
135 #ifdef CONFIG_PPC64
136
137 /*
138  * This is the system wide DSCR register default value. Any
139  * change to this default value through the sysfs interface
140  * will update all per cpu DSCR default values across the
141  * system stored in their respective PACA structures.
142  */
143 static unsigned long dscr_default;
144
145 /**
146  * read_dscr() - Fetch the cpu specific DSCR default
147  * @val:        Returned cpu specific DSCR default value
148  *
149  * This function returns the per cpu DSCR default value
150  * for any cpu which is contained in it's PACA structure.
151  */
152 static void read_dscr(void *val)
153 {
154         *(unsigned long *)val = get_paca()->dscr_default;
155 }
156
157
158 /**
159  * write_dscr() - Update the cpu specific DSCR default
160  * @val:        New cpu specific DSCR default value to update
161  *
162  * This function updates the per cpu DSCR default value
163  * for any cpu which is contained in it's PACA structure.
164  */
165 static void write_dscr(void *val)
166 {
167         get_paca()->dscr_default = *(unsigned long *)val;
168         if (!current->thread.dscr_inherit) {
169                 current->thread.dscr = *(unsigned long *)val;
170                 mtspr(SPRN_DSCR, *(unsigned long *)val);
171         }
172 }
173
174 SYSFS_SPRSETUP_SHOW_STORE(dscr);
175 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
176
177 static void add_write_permission_dev_attr(struct device_attribute *attr)
178 {
179         attr->attr.mode |= 0200;
180 }
181
182 /**
183  * show_dscr_default() - Fetch the system wide DSCR default
184  * @dev:        Device structure
185  * @attr:       Device attribute structure
186  * @buf:        Interface buffer
187  *
188  * This function returns the system wide DSCR default value.
189  */
190 static ssize_t show_dscr_default(struct device *dev,
191                 struct device_attribute *attr, char *buf)
192 {
193         return sprintf(buf, "%lx\n", dscr_default);
194 }
195
196 /**
197  * store_dscr_default() - Update the system wide DSCR default
198  * @dev:        Device structure
199  * @attr:       Device attribute structure
200  * @buf:        Interface buffer
201  * @count:      Size of the update
202  *
203  * This function updates the system wide DSCR default value.
204  */
205 static ssize_t __used store_dscr_default(struct device *dev,
206                 struct device_attribute *attr, const char *buf,
207                 size_t count)
208 {
209         unsigned long val;
210         int ret = 0;
211
212         ret = sscanf(buf, "%lx", &val);
213         if (ret != 1)
214                 return -EINVAL;
215         dscr_default = val;
216
217         on_each_cpu(write_dscr, &val, 1);
218
219         return count;
220 }
221
222 static DEVICE_ATTR(dscr_default, 0600,
223                 show_dscr_default, store_dscr_default);
224
225 static void sysfs_create_dscr_default(void)
226 {
227         if (cpu_has_feature(CPU_FTR_DSCR)) {
228                 int err = 0;
229                 int cpu;
230
231                 dscr_default = spr_default_dscr;
232                 for_each_possible_cpu(cpu)
233                         paca_ptrs[cpu]->dscr_default = dscr_default;
234
235                 err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
236         }
237 }
238 #endif /* CONFIG_PPC64 */
239
240 #ifdef CONFIG_PPC_FSL_BOOK3E
241 #define MAX_BIT                         63
242
243 static u64 pw20_wt;
244 static u64 altivec_idle_wt;
245
246 static unsigned int get_idle_ticks_bit(u64 ns)
247 {
248         u64 cycle;
249
250         if (ns >= 10000)
251                 cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
252         else
253                 cycle = div_u64(ns * tb_ticks_per_usec, 1000);
254
255         if (!cycle)
256                 return 0;
257
258         return ilog2(cycle);
259 }
260
261 static void do_show_pwrmgtcr0(void *val)
262 {
263         u32 *value = val;
264
265         *value = mfspr(SPRN_PWRMGTCR0);
266 }
267
268 static ssize_t show_pw20_state(struct device *dev,
269                                 struct device_attribute *attr, char *buf)
270 {
271         u32 value;
272         unsigned int cpu = dev->id;
273
274         smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
275
276         value &= PWRMGTCR0_PW20_WAIT;
277
278         return sprintf(buf, "%u\n", value ? 1 : 0);
279 }
280
281 static void do_store_pw20_state(void *val)
282 {
283         u32 *value = val;
284         u32 pw20_state;
285
286         pw20_state = mfspr(SPRN_PWRMGTCR0);
287
288         if (*value)
289                 pw20_state |= PWRMGTCR0_PW20_WAIT;
290         else
291                 pw20_state &= ~PWRMGTCR0_PW20_WAIT;
292
293         mtspr(SPRN_PWRMGTCR0, pw20_state);
294 }
295
296 static ssize_t store_pw20_state(struct device *dev,
297                                 struct device_attribute *attr,
298                                 const char *buf, size_t count)
299 {
300         u32 value;
301         unsigned int cpu = dev->id;
302
303         if (kstrtou32(buf, 0, &value))
304                 return -EINVAL;
305
306         if (value > 1)
307                 return -EINVAL;
308
309         smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
310
311         return count;
312 }
313
314 static ssize_t show_pw20_wait_time(struct device *dev,
315                                 struct device_attribute *attr, char *buf)
316 {
317         u32 value;
318         u64 tb_cycle = 1;
319         u64 time;
320
321         unsigned int cpu = dev->id;
322
323         if (!pw20_wt) {
324                 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
325                 value = (value & PWRMGTCR0_PW20_ENT) >>
326                                         PWRMGTCR0_PW20_ENT_SHIFT;
327
328                 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
329                 /* convert ms to ns */
330                 if (tb_ticks_per_usec > 1000) {
331                         time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
332                 } else {
333                         u32 rem_us;
334
335                         time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
336                                                 &rem_us);
337                         time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
338                 }
339         } else {
340                 time = pw20_wt;
341         }
342
343         return sprintf(buf, "%llu\n", time > 0 ? time : 0);
344 }
345
346 static void set_pw20_wait_entry_bit(void *val)
347 {
348         u32 *value = val;
349         u32 pw20_idle;
350
351         pw20_idle = mfspr(SPRN_PWRMGTCR0);
352
353         /* Set Automatic PW20 Core Idle Count */
354         /* clear count */
355         pw20_idle &= ~PWRMGTCR0_PW20_ENT;
356
357         /* set count */
358         pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
359
360         mtspr(SPRN_PWRMGTCR0, pw20_idle);
361 }
362
363 static ssize_t store_pw20_wait_time(struct device *dev,
364                                 struct device_attribute *attr,
365                                 const char *buf, size_t count)
366 {
367         u32 entry_bit;
368         u64 value;
369
370         unsigned int cpu = dev->id;
371
372         if (kstrtou64(buf, 0, &value))
373                 return -EINVAL;
374
375         if (!value)
376                 return -EINVAL;
377
378         entry_bit = get_idle_ticks_bit(value);
379         if (entry_bit > MAX_BIT)
380                 return -EINVAL;
381
382         pw20_wt = value;
383
384         smp_call_function_single(cpu, set_pw20_wait_entry_bit,
385                                 &entry_bit, 1);
386
387         return count;
388 }
389
390 static ssize_t show_altivec_idle(struct device *dev,
391                                 struct device_attribute *attr, char *buf)
392 {
393         u32 value;
394         unsigned int cpu = dev->id;
395
396         smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
397
398         value &= PWRMGTCR0_AV_IDLE_PD_EN;
399
400         return sprintf(buf, "%u\n", value ? 1 : 0);
401 }
402
403 static void do_store_altivec_idle(void *val)
404 {
405         u32 *value = val;
406         u32 altivec_idle;
407
408         altivec_idle = mfspr(SPRN_PWRMGTCR0);
409
410         if (*value)
411                 altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
412         else
413                 altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
414
415         mtspr(SPRN_PWRMGTCR0, altivec_idle);
416 }
417
418 static ssize_t store_altivec_idle(struct device *dev,
419                                 struct device_attribute *attr,
420                                 const char *buf, size_t count)
421 {
422         u32 value;
423         unsigned int cpu = dev->id;
424
425         if (kstrtou32(buf, 0, &value))
426                 return -EINVAL;
427
428         if (value > 1)
429                 return -EINVAL;
430
431         smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
432
433         return count;
434 }
435
436 static ssize_t show_altivec_idle_wait_time(struct device *dev,
437                                 struct device_attribute *attr, char *buf)
438 {
439         u32 value;
440         u64 tb_cycle = 1;
441         u64 time;
442
443         unsigned int cpu = dev->id;
444
445         if (!altivec_idle_wt) {
446                 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
447                 value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
448                                         PWRMGTCR0_AV_IDLE_CNT_SHIFT;
449
450                 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
451                 /* convert ms to ns */
452                 if (tb_ticks_per_usec > 1000) {
453                         time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
454                 } else {
455                         u32 rem_us;
456
457                         time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
458                                                 &rem_us);
459                         time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
460                 }
461         } else {
462                 time = altivec_idle_wt;
463         }
464
465         return sprintf(buf, "%llu\n", time > 0 ? time : 0);
466 }
467
468 static void set_altivec_idle_wait_entry_bit(void *val)
469 {
470         u32 *value = val;
471         u32 altivec_idle;
472
473         altivec_idle = mfspr(SPRN_PWRMGTCR0);
474
475         /* Set Automatic AltiVec Idle Count */
476         /* clear count */
477         altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
478
479         /* set count */
480         altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
481
482         mtspr(SPRN_PWRMGTCR0, altivec_idle);
483 }
484
485 static ssize_t store_altivec_idle_wait_time(struct device *dev,
486                                 struct device_attribute *attr,
487                                 const char *buf, size_t count)
488 {
489         u32 entry_bit;
490         u64 value;
491
492         unsigned int cpu = dev->id;
493
494         if (kstrtou64(buf, 0, &value))
495                 return -EINVAL;
496
497         if (!value)
498                 return -EINVAL;
499
500         entry_bit = get_idle_ticks_bit(value);
501         if (entry_bit > MAX_BIT)
502                 return -EINVAL;
503
504         altivec_idle_wt = value;
505
506         smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
507                                 &entry_bit, 1);
508
509         return count;
510 }
511
512 /*
513  * Enable/Disable interface:
514  * 0, disable. 1, enable.
515  */
516 static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
517 static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
518
519 /*
520  * Set wait time interface:(Nanosecond)
521  * Example: Base on TBfreq is 41MHZ.
522  * 1~48(ns): TB[63]
523  * 49~97(ns): TB[62]
524  * 98~195(ns): TB[61]
525  * 196~390(ns): TB[60]
526  * 391~780(ns): TB[59]
527  * 781~1560(ns): TB[58]
528  * ...
529  */
530 static DEVICE_ATTR(pw20_wait_time, 0600,
531                         show_pw20_wait_time,
532                         store_pw20_wait_time);
533 static DEVICE_ATTR(altivec_idle_wait_time, 0600,
534                         show_altivec_idle_wait_time,
535                         store_altivec_idle_wait_time);
536 #endif
537
538 /*
539  * Enabling PMCs will slow partition context switch times so we only do
540  * it the first time we write to the PMCs.
541  */
542
543 static DEFINE_PER_CPU(char, pmcs_enabled);
544
545 void ppc_enable_pmcs(void)
546 {
547         ppc_set_pmu_inuse(1);
548
549         /* Only need to enable them once */
550         if (__this_cpu_read(pmcs_enabled))
551                 return;
552
553         __this_cpu_write(pmcs_enabled, 1);
554
555         if (ppc_md.enable_pmcs)
556                 ppc_md.enable_pmcs();
557 }
558 EXPORT_SYMBOL(ppc_enable_pmcs);
559
560
561
562 /* Let's define all possible registers, we'll only hook up the ones
563  * that are implemented on the current processor
564  */
565
566 #ifdef CONFIG_PMU_SYSFS
567 #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
568 #define HAS_PPC_PMC_CLASSIC     1
569 #define HAS_PPC_PMC_IBM         1
570 #endif
571
572 #ifdef CONFIG_PPC64
573 #define HAS_PPC_PMC_PA6T        1
574 #define HAS_PPC_PMC56          1
575 #endif
576
577 #ifdef CONFIG_PPC_BOOK3S_32
578 #define HAS_PPC_PMC_G4          1
579 #endif
580 #endif /* CONFIG_PMU_SYSFS */
581
582 #if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
583 #define HAS_PPC_PA6T
584 #endif
585 /*
586  * SPRs which are not related to PMU.
587  */
588 #ifdef CONFIG_PPC64
589 SYSFS_SPRSETUP(purr, SPRN_PURR);
590 SYSFS_SPRSETUP(spurr, SPRN_SPURR);
591 SYSFS_SPRSETUP(pir, SPRN_PIR);
592 SYSFS_SPRSETUP(tscr, SPRN_TSCR);
593
594 /*
595   Lets only enable read for phyp resources and
596   enable write when needed with a separate function.
597   Lets be conservative and default to pseries.
598 */
599 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
600 static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
601 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
602 static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
603 #endif /* CONFIG_PPC64 */
604
605 #ifdef HAS_PPC_PMC_CLASSIC
606 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
607 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
608 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
609 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
610 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
611 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
612 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
613 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
614 #endif
615
616 #ifdef HAS_PPC_PMC_G4
617 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
618 #endif
619
620 #ifdef HAS_PPC_PMC56
621 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
622 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
623
624 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
625 SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
626
627 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
628 static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
629 #endif /* HAS_PPC_PMC56 */
630
631
632
633
634 #ifdef HAS_PPC_PMC_PA6T
635 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
636 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
637 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
638 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
639 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
640 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
641 #endif
642
643 #ifdef HAS_PPC_PA6T
644 SYSFS_SPRSETUP(hid0, SPRN_HID0);
645 SYSFS_SPRSETUP(hid1, SPRN_HID1);
646 SYSFS_SPRSETUP(hid4, SPRN_HID4);
647 SYSFS_SPRSETUP(hid5, SPRN_HID5);
648 SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
649 SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
650 SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
651 SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
652 SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
653 SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
654 SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
655 SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
656 SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
657 SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
658 SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
659 SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
660 SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
661 SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
662 SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
663 SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
664 SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
665 SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
666 SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
667 SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
668 SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
669 SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
670 SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
671 SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
672 #endif /* HAS_PPC_PA6T */
673
674 #ifdef HAS_PPC_PMC_IBM
675 static struct device_attribute ibm_common_attrs[] = {
676         __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
677         __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
678 };
679 #endif /* HAS_PPC_PMC_IBM */
680
681 #ifdef HAS_PPC_PMC_G4
682 static struct device_attribute g4_common_attrs[] = {
683         __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
684         __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
685         __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
686 };
687 #endif /* HAS_PPC_PMC_G4 */
688
689 #ifdef HAS_PPC_PMC_CLASSIC
690 static struct device_attribute classic_pmc_attrs[] = {
691         __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
692         __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
693         __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
694         __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
695         __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
696         __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
697 #ifdef HAS_PPC_PMC56
698         __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
699         __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
700 #endif
701 };
702 #endif
703
704 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
705 static struct device_attribute pa6t_attrs[] = {
706 #ifdef HAS_PPC_PMC_PA6T
707         __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
708         __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
709         __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
710         __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
711         __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
712         __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
713         __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
714         __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
715 #endif
716 #ifdef HAS_PPC_PA6T
717         __ATTR(hid0, 0600, show_hid0, store_hid0),
718         __ATTR(hid1, 0600, show_hid1, store_hid1),
719         __ATTR(hid4, 0600, show_hid4, store_hid4),
720         __ATTR(hid5, 0600, show_hid5, store_hid5),
721         __ATTR(ima0, 0600, show_ima0, store_ima0),
722         __ATTR(ima1, 0600, show_ima1, store_ima1),
723         __ATTR(ima2, 0600, show_ima2, store_ima2),
724         __ATTR(ima3, 0600, show_ima3, store_ima3),
725         __ATTR(ima4, 0600, show_ima4, store_ima4),
726         __ATTR(ima5, 0600, show_ima5, store_ima5),
727         __ATTR(ima6, 0600, show_ima6, store_ima6),
728         __ATTR(ima7, 0600, show_ima7, store_ima7),
729         __ATTR(ima8, 0600, show_ima8, store_ima8),
730         __ATTR(ima9, 0600, show_ima9, store_ima9),
731         __ATTR(imaat, 0600, show_imaat, store_imaat),
732         __ATTR(btcr, 0600, show_btcr, store_btcr),
733         __ATTR(pccr, 0600, show_pccr, store_pccr),
734         __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
735         __ATTR(der, 0600, show_der, store_der),
736         __ATTR(mer, 0600, show_mer, store_mer),
737         __ATTR(ber, 0600, show_ber, store_ber),
738         __ATTR(ier, 0600, show_ier, store_ier),
739         __ATTR(sier, 0600, show_sier, store_sier),
740         __ATTR(siar, 0600, show_siar, store_siar),
741         __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
742         __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
743         __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
744         __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
745 #endif /* HAS_PPC_PA6T */
746 };
747 #endif
748
749 #ifdef CONFIG_PPC_SVM
750 static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
751 {
752         return sprintf(buf, "%u\n", is_secure_guest());
753 }
754 static DEVICE_ATTR(svm, 0444, show_svm, NULL);
755
756 static void create_svm_file(void)
757 {
758         device_create_file(cpu_subsys.dev_root, &dev_attr_svm);
759 }
760 #else
761 static void create_svm_file(void)
762 {
763 }
764 #endif /* CONFIG_PPC_SVM */
765
766 #ifdef CONFIG_PPC_PSERIES
767 static void read_idle_purr(void *val)
768 {
769         u64 *ret = val;
770
771         *ret = read_this_idle_purr();
772 }
773
774 static ssize_t idle_purr_show(struct device *dev,
775                               struct device_attribute *attr, char *buf)
776 {
777         struct cpu *cpu = container_of(dev, struct cpu, dev);
778         u64 val;
779
780         smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
781         return sprintf(buf, "%llx\n", val);
782 }
783 static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
784
785 static void create_idle_purr_file(struct device *s)
786 {
787         if (firmware_has_feature(FW_FEATURE_LPAR))
788                 device_create_file(s, &dev_attr_idle_purr);
789 }
790
791 static void remove_idle_purr_file(struct device *s)
792 {
793         if (firmware_has_feature(FW_FEATURE_LPAR))
794                 device_remove_file(s, &dev_attr_idle_purr);
795 }
796
797 static void read_idle_spurr(void *val)
798 {
799         u64 *ret = val;
800
801         *ret = read_this_idle_spurr();
802 }
803
804 static ssize_t idle_spurr_show(struct device *dev,
805                                struct device_attribute *attr, char *buf)
806 {
807         struct cpu *cpu = container_of(dev, struct cpu, dev);
808         u64 val;
809
810         smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
811         return sprintf(buf, "%llx\n", val);
812 }
813 static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
814
815 static void create_idle_spurr_file(struct device *s)
816 {
817         if (firmware_has_feature(FW_FEATURE_LPAR))
818                 device_create_file(s, &dev_attr_idle_spurr);
819 }
820
821 static void remove_idle_spurr_file(struct device *s)
822 {
823         if (firmware_has_feature(FW_FEATURE_LPAR))
824                 device_remove_file(s, &dev_attr_idle_spurr);
825 }
826
827 #else /* CONFIG_PPC_PSERIES */
828 #define create_idle_purr_file(s)
829 #define remove_idle_purr_file(s)
830 #define create_idle_spurr_file(s)
831 #define remove_idle_spurr_file(s)
832 #endif /* CONFIG_PPC_PSERIES */
833
834 static int register_cpu_online(unsigned int cpu)
835 {
836         struct cpu *c = &per_cpu(cpu_devices, cpu);
837         struct device *s = &c->dev;
838         struct device_attribute *attrs, *pmc_attrs;
839         int i, nattrs;
840
841         /* For cpus present at boot a reference was already grabbed in register_cpu() */
842         if (!s->of_node)
843                 s->of_node = of_get_cpu_node(cpu, NULL);
844
845 #ifdef CONFIG_PPC64
846         if (cpu_has_feature(CPU_FTR_SMT))
847                 device_create_file(s, &dev_attr_smt_snooze_delay);
848 #endif
849
850         /* PMC stuff */
851         switch (cur_cpu_spec->pmc_type) {
852 #ifdef HAS_PPC_PMC_IBM
853         case PPC_PMC_IBM:
854                 attrs = ibm_common_attrs;
855                 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
856                 pmc_attrs = classic_pmc_attrs;
857                 break;
858 #endif /* HAS_PPC_PMC_IBM */
859 #ifdef HAS_PPC_PMC_G4
860         case PPC_PMC_G4:
861                 attrs = g4_common_attrs;
862                 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
863                 pmc_attrs = classic_pmc_attrs;
864                 break;
865 #endif /* HAS_PPC_PMC_G4 */
866 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
867         case PPC_PMC_PA6T:
868                 /* PA Semi starts counting at PMC0 */
869                 attrs = pa6t_attrs;
870                 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
871                 pmc_attrs = NULL;
872                 break;
873 #endif
874         default:
875                 attrs = NULL;
876                 nattrs = 0;
877                 pmc_attrs = NULL;
878         }
879
880         for (i = 0; i < nattrs; i++)
881                 device_create_file(s, &attrs[i]);
882
883         if (pmc_attrs)
884                 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
885                         device_create_file(s, &pmc_attrs[i]);
886
887 #ifdef CONFIG_PPC64
888 #ifdef  CONFIG_PMU_SYSFS
889         if (cpu_has_feature(CPU_FTR_MMCRA))
890                 device_create_file(s, &dev_attr_mmcra);
891
892         if (cpu_has_feature(CPU_FTR_ARCH_31))
893                 device_create_file(s, &dev_attr_mmcr3);
894 #endif /* CONFIG_PMU_SYSFS */
895
896         if (cpu_has_feature(CPU_FTR_PURR)) {
897                 if (!firmware_has_feature(FW_FEATURE_LPAR))
898                         add_write_permission_dev_attr(&dev_attr_purr);
899                 device_create_file(s, &dev_attr_purr);
900                 create_idle_purr_file(s);
901         }
902
903         if (cpu_has_feature(CPU_FTR_SPURR)) {
904                 device_create_file(s, &dev_attr_spurr);
905                 create_idle_spurr_file(s);
906         }
907
908         if (cpu_has_feature(CPU_FTR_DSCR))
909                 device_create_file(s, &dev_attr_dscr);
910
911         if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
912                 device_create_file(s, &dev_attr_pir);
913
914         if (cpu_has_feature(CPU_FTR_ARCH_206) &&
915                 !firmware_has_feature(FW_FEATURE_LPAR))
916                 device_create_file(s, &dev_attr_tscr);
917 #endif /* CONFIG_PPC64 */
918
919 #ifdef CONFIG_PPC_FSL_BOOK3E
920         if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
921                 device_create_file(s, &dev_attr_pw20_state);
922                 device_create_file(s, &dev_attr_pw20_wait_time);
923
924                 device_create_file(s, &dev_attr_altivec_idle);
925                 device_create_file(s, &dev_attr_altivec_idle_wait_time);
926         }
927 #endif
928         cacheinfo_cpu_online(cpu);
929         return 0;
930 }
931
932 #ifdef CONFIG_HOTPLUG_CPU
933 static int unregister_cpu_online(unsigned int cpu)
934 {
935         struct cpu *c = &per_cpu(cpu_devices, cpu);
936         struct device *s = &c->dev;
937         struct device_attribute *attrs, *pmc_attrs;
938         int i, nattrs;
939
940         BUG_ON(!c->hotpluggable);
941
942 #ifdef CONFIG_PPC64
943         if (cpu_has_feature(CPU_FTR_SMT))
944                 device_remove_file(s, &dev_attr_smt_snooze_delay);
945 #endif
946
947         /* PMC stuff */
948         switch (cur_cpu_spec->pmc_type) {
949 #ifdef HAS_PPC_PMC_IBM
950         case PPC_PMC_IBM:
951                 attrs = ibm_common_attrs;
952                 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
953                 pmc_attrs = classic_pmc_attrs;
954                 break;
955 #endif /* HAS_PPC_PMC_IBM */
956 #ifdef HAS_PPC_PMC_G4
957         case PPC_PMC_G4:
958                 attrs = g4_common_attrs;
959                 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
960                 pmc_attrs = classic_pmc_attrs;
961                 break;
962 #endif /* HAS_PPC_PMC_G4 */
963 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
964         case PPC_PMC_PA6T:
965                 /* PA Semi starts counting at PMC0 */
966                 attrs = pa6t_attrs;
967                 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
968                 pmc_attrs = NULL;
969                 break;
970 #endif
971         default:
972                 attrs = NULL;
973                 nattrs = 0;
974                 pmc_attrs = NULL;
975         }
976
977         for (i = 0; i < nattrs; i++)
978                 device_remove_file(s, &attrs[i]);
979
980         if (pmc_attrs)
981                 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
982                         device_remove_file(s, &pmc_attrs[i]);
983
984 #ifdef CONFIG_PPC64
985 #ifdef CONFIG_PMU_SYSFS
986         if (cpu_has_feature(CPU_FTR_MMCRA))
987                 device_remove_file(s, &dev_attr_mmcra);
988
989         if (cpu_has_feature(CPU_FTR_ARCH_31))
990                 device_remove_file(s, &dev_attr_mmcr3);
991 #endif /* CONFIG_PMU_SYSFS */
992
993         if (cpu_has_feature(CPU_FTR_PURR)) {
994                 device_remove_file(s, &dev_attr_purr);
995                 remove_idle_purr_file(s);
996         }
997
998         if (cpu_has_feature(CPU_FTR_SPURR)) {
999                 device_remove_file(s, &dev_attr_spurr);
1000                 remove_idle_spurr_file(s);
1001         }
1002
1003         if (cpu_has_feature(CPU_FTR_DSCR))
1004                 device_remove_file(s, &dev_attr_dscr);
1005
1006         if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
1007                 device_remove_file(s, &dev_attr_pir);
1008
1009         if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1010                 !firmware_has_feature(FW_FEATURE_LPAR))
1011                 device_remove_file(s, &dev_attr_tscr);
1012 #endif /* CONFIG_PPC64 */
1013
1014 #ifdef CONFIG_PPC_FSL_BOOK3E
1015         if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
1016                 device_remove_file(s, &dev_attr_pw20_state);
1017                 device_remove_file(s, &dev_attr_pw20_wait_time);
1018
1019                 device_remove_file(s, &dev_attr_altivec_idle);
1020                 device_remove_file(s, &dev_attr_altivec_idle_wait_time);
1021         }
1022 #endif
1023         cacheinfo_cpu_offline(cpu);
1024         of_node_put(s->of_node);
1025         s->of_node = NULL;
1026         return 0;
1027 }
1028 #else /* !CONFIG_HOTPLUG_CPU */
1029 #define unregister_cpu_online NULL
1030 #endif
1031
1032 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
1033 ssize_t arch_cpu_probe(const char *buf, size_t count)
1034 {
1035         if (ppc_md.cpu_probe)
1036                 return ppc_md.cpu_probe(buf, count);
1037
1038         return -EINVAL;
1039 }
1040
1041 ssize_t arch_cpu_release(const char *buf, size_t count)
1042 {
1043         if (ppc_md.cpu_release)
1044                 return ppc_md.cpu_release(buf, count);
1045
1046         return -EINVAL;
1047 }
1048 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
1049
1050 static DEFINE_MUTEX(cpu_mutex);
1051
1052 int cpu_add_dev_attr(struct device_attribute *attr)
1053 {
1054         int cpu;
1055
1056         mutex_lock(&cpu_mutex);
1057
1058         for_each_possible_cpu(cpu) {
1059                 device_create_file(get_cpu_device(cpu), attr);
1060         }
1061
1062         mutex_unlock(&cpu_mutex);
1063         return 0;
1064 }
1065 EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
1066
1067 int cpu_add_dev_attr_group(struct attribute_group *attrs)
1068 {
1069         int cpu;
1070         struct device *dev;
1071         int ret;
1072
1073         mutex_lock(&cpu_mutex);
1074
1075         for_each_possible_cpu(cpu) {
1076                 dev = get_cpu_device(cpu);
1077                 ret = sysfs_create_group(&dev->kobj, attrs);
1078                 WARN_ON(ret != 0);
1079         }
1080
1081         mutex_unlock(&cpu_mutex);
1082         return 0;
1083 }
1084 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
1085
1086
1087 void cpu_remove_dev_attr(struct device_attribute *attr)
1088 {
1089         int cpu;
1090
1091         mutex_lock(&cpu_mutex);
1092
1093         for_each_possible_cpu(cpu) {
1094                 device_remove_file(get_cpu_device(cpu), attr);
1095         }
1096
1097         mutex_unlock(&cpu_mutex);
1098 }
1099 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
1100
1101 void cpu_remove_dev_attr_group(struct attribute_group *attrs)
1102 {
1103         int cpu;
1104         struct device *dev;
1105
1106         mutex_lock(&cpu_mutex);
1107
1108         for_each_possible_cpu(cpu) {
1109                 dev = get_cpu_device(cpu);
1110                 sysfs_remove_group(&dev->kobj, attrs);
1111         }
1112
1113         mutex_unlock(&cpu_mutex);
1114 }
1115 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
1116
1117
1118 /* NUMA stuff */
1119
1120 #ifdef CONFIG_NUMA
1121 static void register_nodes(void)
1122 {
1123         int i;
1124
1125         for (i = 0; i < MAX_NUMNODES; i++)
1126                 register_one_node(i);
1127 }
1128
1129 int sysfs_add_device_to_node(struct device *dev, int nid)
1130 {
1131         struct node *node = node_devices[nid];
1132         return sysfs_create_link(&node->dev.kobj, &dev->kobj,
1133                         kobject_name(&dev->kobj));
1134 }
1135 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
1136
1137 void sysfs_remove_device_from_node(struct device *dev, int nid)
1138 {
1139         struct node *node = node_devices[nid];
1140         sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
1141 }
1142 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
1143
1144 #else
1145 static void register_nodes(void)
1146 {
1147         return;
1148 }
1149
1150 #endif
1151
1152 /* Only valid if CPU is present. */
1153 static ssize_t show_physical_id(struct device *dev,
1154                                 struct device_attribute *attr, char *buf)
1155 {
1156         struct cpu *cpu = container_of(dev, struct cpu, dev);
1157
1158         return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
1159 }
1160 static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
1161
1162 static int __init topology_init(void)
1163 {
1164         int cpu, r;
1165
1166         register_nodes();
1167
1168         for_each_possible_cpu(cpu) {
1169                 struct cpu *c = &per_cpu(cpu_devices, cpu);
1170
1171                 /*
1172                  * For now, we just see if the system supports making
1173                  * the RTAS calls for CPU hotplug.  But, there may be a
1174                  * more comprehensive way to do this for an individual
1175                  * CPU.  For instance, the boot cpu might never be valid
1176                  * for hotplugging.
1177                  */
1178                 if (ppc_md.cpu_die)
1179                         c->hotpluggable = 1;
1180
1181                 if (cpu_online(cpu) || c->hotpluggable) {
1182                         register_cpu(c, cpu);
1183
1184                         device_create_file(&c->dev, &dev_attr_physical_id);
1185                 }
1186         }
1187         r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
1188                               register_cpu_online, unregister_cpu_online);
1189         WARN_ON(r < 0);
1190 #ifdef CONFIG_PPC64
1191         sysfs_create_dscr_default();
1192 #endif /* CONFIG_PPC64 */
1193
1194         create_svm_file();
1195
1196         return 0;
1197 }
1198 subsys_initcall(topology_init);