MIPS: Use fallthrough for arch/mips
[linux-2.6-microblaze.git] / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/export.h>
21 #include <linux/bitops.h>
22
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
27 #include <asm/cpu.h>
28 #include <asm/cpu-features.h>
29 #include <asm/cpu-type.h>
30 #include <asm/io.h>
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
36 #include <asm/war.h>
37 #include <asm/cacheflush.h> /* for run_uncached() */
38 #include <asm/traps.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/mips-cps.h>
41
42 /*
43  * Bits describing what cache ops an SMP callback function may perform.
44  *
45  * R4K_HIT   -  Virtual user or kernel address based cache operations. The
46  *              active_mm must be checked before using user addresses, falling
47  *              back to kmap.
48  * R4K_INDEX -  Index based cache operations.
49  */
50
51 #define R4K_HIT         BIT(0)
52 #define R4K_INDEX       BIT(1)
53
54 /**
55  * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
56  * @type:       Type of cache operations (R4K_HIT or R4K_INDEX).
57  *
58  * Decides whether a cache op needs to be performed on every core in the system.
59  * This may change depending on the @type of cache operation, as well as the set
60  * of online CPUs, so preemption should be disabled by the caller to prevent CPU
61  * hotplug from changing the result.
62  *
63  * Returns:     1 if the cache operation @type should be done on every core in
64  *              the system.
65  *              0 if the cache operation @type is globalized and only needs to
66  *              be performed on a simple CPU.
67  */
68 static inline bool r4k_op_needs_ipi(unsigned int type)
69 {
70         /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
71         if (type == R4K_HIT && mips_cm_present())
72                 return false;
73
74         /*
75          * Hardware doesn't globalize the required cache ops, so SMP calls may
76          * be needed, but only if there are foreign CPUs (non-siblings with
77          * separate caches).
78          */
79         /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
80 #ifdef CONFIG_SMP
81         return !cpumask_empty(&cpu_foreign_map[0]);
82 #else
83         return false;
84 #endif
85 }
86
87 /*
88  * Special Variant of smp_call_function for use by cache functions:
89  *
90  *  o No return value
91  *  o collapses to normal function call on UP kernels
92  *  o collapses to normal function call on systems with a single shared
93  *    primary cache.
94  *  o doesn't disable interrupts on the local CPU
95  */
96 static inline void r4k_on_each_cpu(unsigned int type,
97                                    void (*func)(void *info), void *info)
98 {
99         preempt_disable();
100         if (r4k_op_needs_ipi(type))
101                 smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
102                                        func, info, 1);
103         func(info);
104         preempt_enable();
105 }
106
107 /*
108  * Must die.
109  */
110 static unsigned long icache_size __read_mostly;
111 static unsigned long dcache_size __read_mostly;
112 static unsigned long vcache_size __read_mostly;
113 static unsigned long scache_size __read_mostly;
114
115 /*
116  * Dummy cache handling routines for machines without boardcaches
117  */
118 static void cache_noop(void) {}
119
120 static struct bcache_ops no_sc_ops = {
121         .bc_enable = (void *)cache_noop,
122         .bc_disable = (void *)cache_noop,
123         .bc_wback_inv = (void *)cache_noop,
124         .bc_inv = (void *)cache_noop
125 };
126
127 struct bcache_ops *bcops = &no_sc_ops;
128
129 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
130 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
131
132 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
133 do {                                                                    \
134         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
135                 *(volatile unsigned long *)CKSEG1;                      \
136         if (R4600_V1_HIT_CACHEOP_WAR)                                   \
137                 __asm__ __volatile__("nop;nop;nop;nop");                \
138 } while (0)
139
140 static void (*r4k_blast_dcache_page)(unsigned long addr);
141
142 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
143 {
144         R4600_HIT_CACHEOP_WAR_IMPL;
145         blast_dcache32_page(addr);
146 }
147
148 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
149 {
150         blast_dcache64_page(addr);
151 }
152
153 static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
154 {
155         blast_dcache128_page(addr);
156 }
157
158 static void r4k_blast_dcache_page_setup(void)
159 {
160         unsigned long  dc_lsize = cpu_dcache_line_size();
161
162         switch (dc_lsize) {
163         case 0:
164                 r4k_blast_dcache_page = (void *)cache_noop;
165                 break;
166         case 16:
167                 r4k_blast_dcache_page = blast_dcache16_page;
168                 break;
169         case 32:
170                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
171                 break;
172         case 64:
173                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
174                 break;
175         case 128:
176                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
177                 break;
178         default:
179                 break;
180         }
181 }
182
183 #ifndef CONFIG_EVA
184 #define r4k_blast_dcache_user_page  r4k_blast_dcache_page
185 #else
186
187 static void (*r4k_blast_dcache_user_page)(unsigned long addr);
188
189 static void r4k_blast_dcache_user_page_setup(void)
190 {
191         unsigned long  dc_lsize = cpu_dcache_line_size();
192
193         if (dc_lsize == 0)
194                 r4k_blast_dcache_user_page = (void *)cache_noop;
195         else if (dc_lsize == 16)
196                 r4k_blast_dcache_user_page = blast_dcache16_user_page;
197         else if (dc_lsize == 32)
198                 r4k_blast_dcache_user_page = blast_dcache32_user_page;
199         else if (dc_lsize == 64)
200                 r4k_blast_dcache_user_page = blast_dcache64_user_page;
201 }
202
203 #endif
204
205 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
206
207 static void r4k_blast_dcache_page_indexed_setup(void)
208 {
209         unsigned long dc_lsize = cpu_dcache_line_size();
210
211         if (dc_lsize == 0)
212                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
213         else if (dc_lsize == 16)
214                 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
215         else if (dc_lsize == 32)
216                 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
217         else if (dc_lsize == 64)
218                 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
219         else if (dc_lsize == 128)
220                 r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
221 }
222
223 void (* r4k_blast_dcache)(void);
224 EXPORT_SYMBOL(r4k_blast_dcache);
225
226 static void r4k_blast_dcache_setup(void)
227 {
228         unsigned long dc_lsize = cpu_dcache_line_size();
229
230         if (dc_lsize == 0)
231                 r4k_blast_dcache = (void *)cache_noop;
232         else if (dc_lsize == 16)
233                 r4k_blast_dcache = blast_dcache16;
234         else if (dc_lsize == 32)
235                 r4k_blast_dcache = blast_dcache32;
236         else if (dc_lsize == 64)
237                 r4k_blast_dcache = blast_dcache64;
238         else if (dc_lsize == 128)
239                 r4k_blast_dcache = blast_dcache128;
240 }
241
242 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
243 #define JUMP_TO_ALIGN(order) \
244         __asm__ __volatile__( \
245                 "b\t1f\n\t" \
246                 ".align\t" #order "\n\t" \
247                 "1:\n\t" \
248                 )
249 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
250 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
251
252 static inline void blast_r4600_v1_icache32(void)
253 {
254         unsigned long flags;
255
256         local_irq_save(flags);
257         blast_icache32();
258         local_irq_restore(flags);
259 }
260
261 static inline void tx49_blast_icache32(void)
262 {
263         unsigned long start = INDEX_BASE;
264         unsigned long end = start + current_cpu_data.icache.waysize;
265         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
266         unsigned long ws_end = current_cpu_data.icache.ways <<
267                                current_cpu_data.icache.waybit;
268         unsigned long ws, addr;
269
270         CACHE32_UNROLL32_ALIGN2;
271         /* I'm in even chunk.  blast odd chunks */
272         for (ws = 0; ws < ws_end; ws += ws_inc)
273                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
274                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
275                                      addr | ws, 32);
276         CACHE32_UNROLL32_ALIGN;
277         /* I'm in odd chunk.  blast even chunks */
278         for (ws = 0; ws < ws_end; ws += ws_inc)
279                 for (addr = start; addr < end; addr += 0x400 * 2)
280                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
281                                      addr | ws, 32);
282 }
283
284 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
285 {
286         unsigned long flags;
287
288         local_irq_save(flags);
289         blast_icache32_page_indexed(page);
290         local_irq_restore(flags);
291 }
292
293 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
294 {
295         unsigned long indexmask = current_cpu_data.icache.waysize - 1;
296         unsigned long start = INDEX_BASE + (page & indexmask);
297         unsigned long end = start + PAGE_SIZE;
298         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
299         unsigned long ws_end = current_cpu_data.icache.ways <<
300                                current_cpu_data.icache.waybit;
301         unsigned long ws, addr;
302
303         CACHE32_UNROLL32_ALIGN2;
304         /* I'm in even chunk.  blast odd chunks */
305         for (ws = 0; ws < ws_end; ws += ws_inc)
306                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
307                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
308                                      addr | ws, 32);
309         CACHE32_UNROLL32_ALIGN;
310         /* I'm in odd chunk.  blast even chunks */
311         for (ws = 0; ws < ws_end; ws += ws_inc)
312                 for (addr = start; addr < end; addr += 0x400 * 2)
313                         cache_unroll(32, kernel_cache, Index_Invalidate_I,
314                                      addr | ws, 32);
315 }
316
317 static void (* r4k_blast_icache_page)(unsigned long addr);
318
319 static void r4k_blast_icache_page_setup(void)
320 {
321         unsigned long ic_lsize = cpu_icache_line_size();
322
323         if (ic_lsize == 0)
324                 r4k_blast_icache_page = (void *)cache_noop;
325         else if (ic_lsize == 16)
326                 r4k_blast_icache_page = blast_icache16_page;
327         else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
328                 r4k_blast_icache_page = loongson2_blast_icache32_page;
329         else if (ic_lsize == 32)
330                 r4k_blast_icache_page = blast_icache32_page;
331         else if (ic_lsize == 64)
332                 r4k_blast_icache_page = blast_icache64_page;
333         else if (ic_lsize == 128)
334                 r4k_blast_icache_page = blast_icache128_page;
335 }
336
337 #ifndef CONFIG_EVA
338 #define r4k_blast_icache_user_page  r4k_blast_icache_page
339 #else
340
341 static void (*r4k_blast_icache_user_page)(unsigned long addr);
342
343 static void r4k_blast_icache_user_page_setup(void)
344 {
345         unsigned long ic_lsize = cpu_icache_line_size();
346
347         if (ic_lsize == 0)
348                 r4k_blast_icache_user_page = (void *)cache_noop;
349         else if (ic_lsize == 16)
350                 r4k_blast_icache_user_page = blast_icache16_user_page;
351         else if (ic_lsize == 32)
352                 r4k_blast_icache_user_page = blast_icache32_user_page;
353         else if (ic_lsize == 64)
354                 r4k_blast_icache_user_page = blast_icache64_user_page;
355 }
356
357 #endif
358
359 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
360
361 static void r4k_blast_icache_page_indexed_setup(void)
362 {
363         unsigned long ic_lsize = cpu_icache_line_size();
364
365         if (ic_lsize == 0)
366                 r4k_blast_icache_page_indexed = (void *)cache_noop;
367         else if (ic_lsize == 16)
368                 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
369         else if (ic_lsize == 32) {
370                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
371                         r4k_blast_icache_page_indexed =
372                                 blast_icache32_r4600_v1_page_indexed;
373                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
374                         r4k_blast_icache_page_indexed =
375                                 tx49_blast_icache32_page_indexed;
376                 else if (current_cpu_type() == CPU_LOONGSON2EF)
377                         r4k_blast_icache_page_indexed =
378                                 loongson2_blast_icache32_page_indexed;
379                 else
380                         r4k_blast_icache_page_indexed =
381                                 blast_icache32_page_indexed;
382         } else if (ic_lsize == 64)
383                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
384 }
385
386 void (* r4k_blast_icache)(void);
387 EXPORT_SYMBOL(r4k_blast_icache);
388
389 static void r4k_blast_icache_setup(void)
390 {
391         unsigned long ic_lsize = cpu_icache_line_size();
392
393         if (ic_lsize == 0)
394                 r4k_blast_icache = (void *)cache_noop;
395         else if (ic_lsize == 16)
396                 r4k_blast_icache = blast_icache16;
397         else if (ic_lsize == 32) {
398                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
399                         r4k_blast_icache = blast_r4600_v1_icache32;
400                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
401                         r4k_blast_icache = tx49_blast_icache32;
402                 else if (current_cpu_type() == CPU_LOONGSON2EF)
403                         r4k_blast_icache = loongson2_blast_icache32;
404                 else
405                         r4k_blast_icache = blast_icache32;
406         } else if (ic_lsize == 64)
407                 r4k_blast_icache = blast_icache64;
408         else if (ic_lsize == 128)
409                 r4k_blast_icache = blast_icache128;
410 }
411
412 static void (* r4k_blast_scache_page)(unsigned long addr);
413
414 static void r4k_blast_scache_page_setup(void)
415 {
416         unsigned long sc_lsize = cpu_scache_line_size();
417
418         if (scache_size == 0)
419                 r4k_blast_scache_page = (void *)cache_noop;
420         else if (sc_lsize == 16)
421                 r4k_blast_scache_page = blast_scache16_page;
422         else if (sc_lsize == 32)
423                 r4k_blast_scache_page = blast_scache32_page;
424         else if (sc_lsize == 64)
425                 r4k_blast_scache_page = blast_scache64_page;
426         else if (sc_lsize == 128)
427                 r4k_blast_scache_page = blast_scache128_page;
428 }
429
430 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
431
432 static void r4k_blast_scache_page_indexed_setup(void)
433 {
434         unsigned long sc_lsize = cpu_scache_line_size();
435
436         if (scache_size == 0)
437                 r4k_blast_scache_page_indexed = (void *)cache_noop;
438         else if (sc_lsize == 16)
439                 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
440         else if (sc_lsize == 32)
441                 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
442         else if (sc_lsize == 64)
443                 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
444         else if (sc_lsize == 128)
445                 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
446 }
447
448 static void (* r4k_blast_scache)(void);
449
450 static void r4k_blast_scache_setup(void)
451 {
452         unsigned long sc_lsize = cpu_scache_line_size();
453
454         if (scache_size == 0)
455                 r4k_blast_scache = (void *)cache_noop;
456         else if (sc_lsize == 16)
457                 r4k_blast_scache = blast_scache16;
458         else if (sc_lsize == 32)
459                 r4k_blast_scache = blast_scache32;
460         else if (sc_lsize == 64)
461                 r4k_blast_scache = blast_scache64;
462         else if (sc_lsize == 128)
463                 r4k_blast_scache = blast_scache128;
464 }
465
466 static void (*r4k_blast_scache_node)(long node);
467
468 static void r4k_blast_scache_node_setup(void)
469 {
470         unsigned long sc_lsize = cpu_scache_line_size();
471
472         if (current_cpu_type() != CPU_LOONGSON64)
473                 r4k_blast_scache_node = (void *)cache_noop;
474         else if (sc_lsize == 16)
475                 r4k_blast_scache_node = blast_scache16_node;
476         else if (sc_lsize == 32)
477                 r4k_blast_scache_node = blast_scache32_node;
478         else if (sc_lsize == 64)
479                 r4k_blast_scache_node = blast_scache64_node;
480         else if (sc_lsize == 128)
481                 r4k_blast_scache_node = blast_scache128_node;
482 }
483
484 static inline void local_r4k___flush_cache_all(void * args)
485 {
486         switch (current_cpu_type()) {
487         case CPU_LOONGSON2EF:
488         case CPU_R4000SC:
489         case CPU_R4000MC:
490         case CPU_R4400SC:
491         case CPU_R4400MC:
492         case CPU_R10000:
493         case CPU_R12000:
494         case CPU_R14000:
495         case CPU_R16000:
496                 /*
497                  * These caches are inclusive caches, that is, if something
498                  * is not cached in the S-cache, we know it also won't be
499                  * in one of the primary caches.
500                  */
501                 r4k_blast_scache();
502                 break;
503
504         case CPU_LOONGSON64:
505                 /* Use get_ebase_cpunum() for both NUMA=y/n */
506                 r4k_blast_scache_node(get_ebase_cpunum() >> 2);
507                 break;
508
509         case CPU_BMIPS5000:
510                 r4k_blast_scache();
511                 __sync();
512                 break;
513
514         default:
515                 r4k_blast_dcache();
516                 r4k_blast_icache();
517                 break;
518         }
519 }
520
521 static void r4k___flush_cache_all(void)
522 {
523         r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
524 }
525
526 /**
527  * has_valid_asid() - Determine if an mm already has an ASID.
528  * @mm:         Memory map.
529  * @type:       R4K_HIT or R4K_INDEX, type of cache op.
530  *
531  * Determines whether @mm already has an ASID on any of the CPUs which cache ops
532  * of type @type within an r4k_on_each_cpu() call will affect. If
533  * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
534  * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
535  * will need to be checked.
536  *
537  * Must be called in non-preemptive context.
538  *
539  * Returns:     1 if the CPUs affected by @type cache ops have an ASID for @mm.
540  *              0 otherwise.
541  */
542 static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
543 {
544         unsigned int i;
545         const cpumask_t *mask = cpu_present_mask;
546
547         if (cpu_has_mmid)
548                 return cpu_context(0, mm) != 0;
549
550         /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
551 #ifdef CONFIG_SMP
552         /*
553          * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
554          * each foreign core, so we only need to worry about siblings.
555          * Otherwise we need to worry about all present CPUs.
556          */
557         if (r4k_op_needs_ipi(type))
558                 mask = &cpu_sibling_map[smp_processor_id()];
559 #endif
560         for_each_cpu(i, mask)
561                 if (cpu_context(i, mm))
562                         return 1;
563         return 0;
564 }
565
566 static void r4k__flush_cache_vmap(void)
567 {
568         r4k_blast_dcache();
569 }
570
571 static void r4k__flush_cache_vunmap(void)
572 {
573         r4k_blast_dcache();
574 }
575
576 /*
577  * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
578  * whole caches when vma is executable.
579  */
580 static inline void local_r4k_flush_cache_range(void * args)
581 {
582         struct vm_area_struct *vma = args;
583         int exec = vma->vm_flags & VM_EXEC;
584
585         if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
586                 return;
587
588         /*
589          * If dcache can alias, we must blast it since mapping is changing.
590          * If executable, we must ensure any dirty lines are written back far
591          * enough to be visible to icache.
592          */
593         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
594                 r4k_blast_dcache();
595         /* If executable, blast stale lines from icache */
596         if (exec)
597                 r4k_blast_icache();
598 }
599
600 static void r4k_flush_cache_range(struct vm_area_struct *vma,
601         unsigned long start, unsigned long end)
602 {
603         int exec = vma->vm_flags & VM_EXEC;
604
605         if (cpu_has_dc_aliases || exec)
606                 r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
607 }
608
609 static inline void local_r4k_flush_cache_mm(void * args)
610 {
611         struct mm_struct *mm = args;
612
613         if (!has_valid_asid(mm, R4K_INDEX))
614                 return;
615
616         /*
617          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
618          * only flush the primary caches but R1x000 behave sane ...
619          * R4000SC and R4400SC indexed S-cache ops also invalidate primary
620          * caches, so we can bail out early.
621          */
622         if (current_cpu_type() == CPU_R4000SC ||
623             current_cpu_type() == CPU_R4000MC ||
624             current_cpu_type() == CPU_R4400SC ||
625             current_cpu_type() == CPU_R4400MC) {
626                 r4k_blast_scache();
627                 return;
628         }
629
630         r4k_blast_dcache();
631 }
632
633 static void r4k_flush_cache_mm(struct mm_struct *mm)
634 {
635         if (!cpu_has_dc_aliases)
636                 return;
637
638         r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
639 }
640
641 struct flush_cache_page_args {
642         struct vm_area_struct *vma;
643         unsigned long addr;
644         unsigned long pfn;
645 };
646
647 static inline void local_r4k_flush_cache_page(void *args)
648 {
649         struct flush_cache_page_args *fcp_args = args;
650         struct vm_area_struct *vma = fcp_args->vma;
651         unsigned long addr = fcp_args->addr;
652         struct page *page = pfn_to_page(fcp_args->pfn);
653         int exec = vma->vm_flags & VM_EXEC;
654         struct mm_struct *mm = vma->vm_mm;
655         int map_coherent = 0;
656         pgd_t *pgdp;
657         p4d_t *p4dp;
658         pud_t *pudp;
659         pmd_t *pmdp;
660         pte_t *ptep;
661         void *vaddr;
662
663         /*
664          * If owns no valid ASID yet, cannot possibly have gotten
665          * this page into the cache.
666          */
667         if (!has_valid_asid(mm, R4K_HIT))
668                 return;
669
670         addr &= PAGE_MASK;
671         pgdp = pgd_offset(mm, addr);
672         p4dp = p4d_offset(pgdp, addr);
673         pudp = pud_offset(p4dp, addr);
674         pmdp = pmd_offset(pudp, addr);
675         ptep = pte_offset(pmdp, addr);
676
677         /*
678          * If the page isn't marked valid, the page cannot possibly be
679          * in the cache.
680          */
681         if (!(pte_present(*ptep)))
682                 return;
683
684         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
685                 vaddr = NULL;
686         else {
687                 /*
688                  * Use kmap_coherent or kmap_atomic to do flushes for
689                  * another ASID than the current one.
690                  */
691                 map_coherent = (cpu_has_dc_aliases &&
692                                 page_mapcount(page) &&
693                                 !Page_dcache_dirty(page));
694                 if (map_coherent)
695                         vaddr = kmap_coherent(page, addr);
696                 else
697                         vaddr = kmap_atomic(page);
698                 addr = (unsigned long)vaddr;
699         }
700
701         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
702                 vaddr ? r4k_blast_dcache_page(addr) :
703                         r4k_blast_dcache_user_page(addr);
704                 if (exec && !cpu_icache_snoops_remote_store)
705                         r4k_blast_scache_page(addr);
706         }
707         if (exec) {
708                 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
709                         drop_mmu_context(mm);
710                 } else
711                         vaddr ? r4k_blast_icache_page(addr) :
712                                 r4k_blast_icache_user_page(addr);
713         }
714
715         if (vaddr) {
716                 if (map_coherent)
717                         kunmap_coherent();
718                 else
719                         kunmap_atomic(vaddr);
720         }
721 }
722
723 static void r4k_flush_cache_page(struct vm_area_struct *vma,
724         unsigned long addr, unsigned long pfn)
725 {
726         struct flush_cache_page_args args;
727
728         args.vma = vma;
729         args.addr = addr;
730         args.pfn = pfn;
731
732         r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
733 }
734
735 static inline void local_r4k_flush_data_cache_page(void * addr)
736 {
737         r4k_blast_dcache_page((unsigned long) addr);
738 }
739
740 static void r4k_flush_data_cache_page(unsigned long addr)
741 {
742         if (in_atomic())
743                 local_r4k_flush_data_cache_page((void *)addr);
744         else
745                 r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
746                                 (void *) addr);
747 }
748
749 struct flush_icache_range_args {
750         unsigned long start;
751         unsigned long end;
752         unsigned int type;
753         bool user;
754 };
755
756 static inline void __local_r4k_flush_icache_range(unsigned long start,
757                                                   unsigned long end,
758                                                   unsigned int type,
759                                                   bool user)
760 {
761         if (!cpu_has_ic_fills_f_dc) {
762                 if (type == R4K_INDEX ||
763                     (type & R4K_INDEX && end - start >= dcache_size)) {
764                         r4k_blast_dcache();
765                 } else {
766                         R4600_HIT_CACHEOP_WAR_IMPL;
767                         if (user)
768                                 protected_blast_dcache_range(start, end);
769                         else
770                                 blast_dcache_range(start, end);
771                 }
772         }
773
774         if (type == R4K_INDEX ||
775             (type & R4K_INDEX && end - start > icache_size))
776                 r4k_blast_icache();
777         else {
778                 switch (boot_cpu_type()) {
779                 case CPU_LOONGSON2EF:
780                         protected_loongson2_blast_icache_range(start, end);
781                         break;
782
783                 default:
784                         if (user)
785                                 protected_blast_icache_range(start, end);
786                         else
787                                 blast_icache_range(start, end);
788                         break;
789                 }
790         }
791 }
792
793 static inline void local_r4k_flush_icache_range(unsigned long start,
794                                                 unsigned long end)
795 {
796         __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
797 }
798
799 static inline void local_r4k_flush_icache_user_range(unsigned long start,
800                                                      unsigned long end)
801 {
802         __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
803 }
804
805 static inline void local_r4k_flush_icache_range_ipi(void *args)
806 {
807         struct flush_icache_range_args *fir_args = args;
808         unsigned long start = fir_args->start;
809         unsigned long end = fir_args->end;
810         unsigned int type = fir_args->type;
811         bool user = fir_args->user;
812
813         __local_r4k_flush_icache_range(start, end, type, user);
814 }
815
816 static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
817                                      bool user)
818 {
819         struct flush_icache_range_args args;
820         unsigned long size, cache_size;
821
822         args.start = start;
823         args.end = end;
824         args.type = R4K_HIT | R4K_INDEX;
825         args.user = user;
826
827         /*
828          * Indexed cache ops require an SMP call.
829          * Consider if that can or should be avoided.
830          */
831         preempt_disable();
832         if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
833                 /*
834                  * If address-based cache ops don't require an SMP call, then
835                  * use them exclusively for small flushes.
836                  */
837                 size = end - start;
838                 cache_size = icache_size;
839                 if (!cpu_has_ic_fills_f_dc) {
840                         size *= 2;
841                         cache_size += dcache_size;
842                 }
843                 if (size <= cache_size)
844                         args.type &= ~R4K_INDEX;
845         }
846         r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
847         preempt_enable();
848         instruction_hazard();
849 }
850
851 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
852 {
853         return __r4k_flush_icache_range(start, end, false);
854 }
855
856 static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
857 {
858         return __r4k_flush_icache_range(start, end, true);
859 }
860
861 #ifdef CONFIG_DMA_NONCOHERENT
862
863 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
864 {
865         /* Catch bad driver code */
866         if (WARN_ON(size == 0))
867                 return;
868
869         preempt_disable();
870         if (cpu_has_inclusive_pcaches) {
871                 if (size >= scache_size) {
872                         if (current_cpu_type() != CPU_LOONGSON64)
873                                 r4k_blast_scache();
874                         else
875                                 r4k_blast_scache_node(pa_to_nid(addr));
876                 } else {
877                         blast_scache_range(addr, addr + size);
878                 }
879                 preempt_enable();
880                 __sync();
881                 return;
882         }
883
884         /*
885          * Either no secondary cache or the available caches don't have the
886          * subset property so we have to flush the primary caches
887          * explicitly.
888          * If we would need IPI to perform an INDEX-type operation, then
889          * we have to use the HIT-type alternative as IPI cannot be used
890          * here due to interrupts possibly being disabled.
891          */
892         if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
893                 r4k_blast_dcache();
894         } else {
895                 R4600_HIT_CACHEOP_WAR_IMPL;
896                 blast_dcache_range(addr, addr + size);
897         }
898         preempt_enable();
899
900         bc_wback_inv(addr, size);
901         __sync();
902 }
903
904 static void prefetch_cache_inv(unsigned long addr, unsigned long size)
905 {
906         unsigned int linesz = cpu_scache_line_size();
907         unsigned long addr0 = addr, addr1;
908
909         addr0 &= ~(linesz - 1);
910         addr1 = (addr0 + size - 1) & ~(linesz - 1);
911
912         protected_writeback_scache_line(addr0);
913         if (likely(addr1 != addr0))
914                 protected_writeback_scache_line(addr1);
915         else
916                 return;
917
918         addr0 += linesz;
919         if (likely(addr1 != addr0))
920                 protected_writeback_scache_line(addr0);
921         else
922                 return;
923
924         addr1 -= linesz;
925         if (likely(addr1 > addr0))
926                 protected_writeback_scache_line(addr0);
927 }
928
929 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
930 {
931         /* Catch bad driver code */
932         if (WARN_ON(size == 0))
933                 return;
934
935         preempt_disable();
936
937         if (current_cpu_type() == CPU_BMIPS5000)
938                 prefetch_cache_inv(addr, size);
939
940         if (cpu_has_inclusive_pcaches) {
941                 if (size >= scache_size) {
942                         if (current_cpu_type() != CPU_LOONGSON64)
943                                 r4k_blast_scache();
944                         else
945                                 r4k_blast_scache_node(pa_to_nid(addr));
946                 } else {
947                         /*
948                          * There is no clearly documented alignment requirement
949                          * for the cache instruction on MIPS processors and
950                          * some processors, among them the RM5200 and RM7000
951                          * QED processors will throw an address error for cache
952                          * hit ops with insufficient alignment.  Solved by
953                          * aligning the address to cache line size.
954                          */
955                         blast_inv_scache_range(addr, addr + size);
956                 }
957                 preempt_enable();
958                 __sync();
959                 return;
960         }
961
962         if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
963                 r4k_blast_dcache();
964         } else {
965                 R4600_HIT_CACHEOP_WAR_IMPL;
966                 blast_inv_dcache_range(addr, addr + size);
967         }
968         preempt_enable();
969
970         bc_inv(addr, size);
971         __sync();
972 }
973 #endif /* CONFIG_DMA_NONCOHERENT */
974
975 static void r4k_flush_icache_all(void)
976 {
977         if (cpu_has_vtag_icache)
978                 r4k_blast_icache();
979 }
980
981 struct flush_kernel_vmap_range_args {
982         unsigned long   vaddr;
983         int             size;
984 };
985
986 static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
987 {
988         /*
989          * Aliases only affect the primary caches so don't bother with
990          * S-caches or T-caches.
991          */
992         r4k_blast_dcache();
993 }
994
995 static inline void local_r4k_flush_kernel_vmap_range(void *args)
996 {
997         struct flush_kernel_vmap_range_args *vmra = args;
998         unsigned long vaddr = vmra->vaddr;
999         int size = vmra->size;
1000
1001         /*
1002          * Aliases only affect the primary caches so don't bother with
1003          * S-caches or T-caches.
1004          */
1005         R4600_HIT_CACHEOP_WAR_IMPL;
1006         blast_dcache_range(vaddr, vaddr + size);
1007 }
1008
1009 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
1010 {
1011         struct flush_kernel_vmap_range_args args;
1012
1013         args.vaddr = (unsigned long) vaddr;
1014         args.size = size;
1015
1016         if (size >= dcache_size)
1017                 r4k_on_each_cpu(R4K_INDEX,
1018                                 local_r4k_flush_kernel_vmap_range_index, NULL);
1019         else
1020                 r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
1021                                 &args);
1022 }
1023
1024 static inline void rm7k_erratum31(void)
1025 {
1026         const unsigned long ic_lsize = 32;
1027         unsigned long addr;
1028
1029         /* RM7000 erratum #31. The icache is screwed at startup. */
1030         write_c0_taglo(0);
1031         write_c0_taghi(0);
1032
1033         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
1034                 __asm__ __volatile__ (
1035                         ".set push\n\t"
1036                         ".set noreorder\n\t"
1037                         ".set mips3\n\t"
1038                         "cache\t%1, 0(%0)\n\t"
1039                         "cache\t%1, 0x1000(%0)\n\t"
1040                         "cache\t%1, 0x2000(%0)\n\t"
1041                         "cache\t%1, 0x3000(%0)\n\t"
1042                         "cache\t%2, 0(%0)\n\t"
1043                         "cache\t%2, 0x1000(%0)\n\t"
1044                         "cache\t%2, 0x2000(%0)\n\t"
1045                         "cache\t%2, 0x3000(%0)\n\t"
1046                         "cache\t%1, 0(%0)\n\t"
1047                         "cache\t%1, 0x1000(%0)\n\t"
1048                         "cache\t%1, 0x2000(%0)\n\t"
1049                         "cache\t%1, 0x3000(%0)\n\t"
1050                         ".set pop\n"
1051                         :
1052                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
1053         }
1054 }
1055
1056 static inline int alias_74k_erratum(struct cpuinfo_mips *c)
1057 {
1058         unsigned int imp = c->processor_id & PRID_IMP_MASK;
1059         unsigned int rev = c->processor_id & PRID_REV_MASK;
1060         int present = 0;
1061
1062         /*
1063          * Early versions of the 74K do not update the cache tags on a
1064          * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
1065          * aliases.  In this case it is better to treat the cache as always
1066          * having aliases.  Also disable the synonym tag update feature
1067          * where available.  In this case no opportunistic tag update will
1068          * happen where a load causes a virtual address miss but a physical
1069          * address hit during a D-cache look-up.
1070          */
1071         switch (imp) {
1072         case PRID_IMP_74K:
1073                 if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
1074                         present = 1;
1075                 if (rev == PRID_REV_ENCODE_332(2, 4, 0))
1076                         write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1077                 break;
1078         case PRID_IMP_1074K:
1079                 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
1080                         present = 1;
1081                         write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1082                 }
1083                 break;
1084         default:
1085                 BUG();
1086         }
1087
1088         return present;
1089 }
1090
1091 static void b5k_instruction_hazard(void)
1092 {
1093         __sync();
1094         __sync();
1095         __asm__ __volatile__(
1096         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1097         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1098         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1099         "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1100         : : : "memory");
1101 }
1102
1103 static char *way_string[] = { NULL, "direct mapped", "2-way",
1104         "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
1105         "9-way", "10-way", "11-way", "12-way",
1106         "13-way", "14-way", "15-way", "16-way",
1107 };
1108
1109 static void probe_pcache(void)
1110 {
1111         struct cpuinfo_mips *c = &current_cpu_data;
1112         unsigned int config = read_c0_config();
1113         unsigned int prid = read_c0_prid();
1114         int has_74k_erratum = 0;
1115         unsigned long config1;
1116         unsigned int lsize;
1117
1118         switch (current_cpu_type()) {
1119         case CPU_R4600:                 /* QED style two way caches? */
1120         case CPU_R4700:
1121         case CPU_R5000:
1122         case CPU_NEVADA:
1123                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1124                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1125                 c->icache.ways = 2;
1126                 c->icache.waybit = __ffs(icache_size/2);
1127
1128                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1129                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1130                 c->dcache.ways = 2;
1131                 c->dcache.waybit= __ffs(dcache_size/2);
1132
1133                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1134                 break;
1135
1136         case CPU_R5500:
1137                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1138                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1139                 c->icache.ways = 2;
1140                 c->icache.waybit= 0;
1141
1142                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1143                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1144                 c->dcache.ways = 2;
1145                 c->dcache.waybit = 0;
1146
1147                 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
1148                 break;
1149
1150         case CPU_TX49XX:
1151                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1152                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1153                 c->icache.ways = 4;
1154                 c->icache.waybit= 0;
1155
1156                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1157                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1158                 c->dcache.ways = 4;
1159                 c->dcache.waybit = 0;
1160
1161                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1162                 c->options |= MIPS_CPU_PREFETCH;
1163                 break;
1164
1165         case CPU_R4000PC:
1166         case CPU_R4000SC:
1167         case CPU_R4000MC:
1168         case CPU_R4400PC:
1169         case CPU_R4400SC:
1170         case CPU_R4400MC:
1171                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1172                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1173                 c->icache.ways = 1;
1174                 c->icache.waybit = 0;   /* doesn't matter */
1175
1176                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1177                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1178                 c->dcache.ways = 1;
1179                 c->dcache.waybit = 0;   /* does not matter */
1180
1181                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1182                 break;
1183
1184         case CPU_R10000:
1185         case CPU_R12000:
1186         case CPU_R14000:
1187         case CPU_R16000:
1188                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1189                 c->icache.linesz = 64;
1190                 c->icache.ways = 2;
1191                 c->icache.waybit = 0;
1192
1193                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1194                 c->dcache.linesz = 32;
1195                 c->dcache.ways = 2;
1196                 c->dcache.waybit = 0;
1197
1198                 c->options |= MIPS_CPU_PREFETCH;
1199                 break;
1200
1201         case CPU_VR4133:
1202                 write_c0_config(config & ~VR41_CONF_P4K);
1203                 fallthrough;
1204         case CPU_VR4131:
1205                 /* Workaround for cache instruction bug of VR4131 */
1206                 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1207                     c->processor_id == 0x0c82U) {
1208                         config |= 0x00400000U;
1209                         if (c->processor_id == 0x0c80U)
1210                                 config |= VR41_CONF_BP;
1211                         write_c0_config(config);
1212                 } else
1213                         c->options |= MIPS_CPU_CACHE_CDEX_P;
1214
1215                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1216                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1217                 c->icache.ways = 2;
1218                 c->icache.waybit = __ffs(icache_size/2);
1219
1220                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1221                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1222                 c->dcache.ways = 2;
1223                 c->dcache.waybit = __ffs(dcache_size/2);
1224                 break;
1225
1226         case CPU_VR41XX:
1227         case CPU_VR4111:
1228         case CPU_VR4121:
1229         case CPU_VR4122:
1230         case CPU_VR4181:
1231         case CPU_VR4181A:
1232                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1233                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1234                 c->icache.ways = 1;
1235                 c->icache.waybit = 0;   /* doesn't matter */
1236
1237                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1238                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1239                 c->dcache.ways = 1;
1240                 c->dcache.waybit = 0;   /* does not matter */
1241
1242                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1243                 break;
1244
1245         case CPU_RM7000:
1246                 rm7k_erratum31();
1247
1248                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1249                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1250                 c->icache.ways = 4;
1251                 c->icache.waybit = __ffs(icache_size / c->icache.ways);
1252
1253                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1254                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1255                 c->dcache.ways = 4;
1256                 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1257
1258                 c->options |= MIPS_CPU_CACHE_CDEX_P;
1259                 c->options |= MIPS_CPU_PREFETCH;
1260                 break;
1261
1262         case CPU_LOONGSON2EF:
1263                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1264                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1265                 if (prid & 0x3)
1266                         c->icache.ways = 4;
1267                 else
1268                         c->icache.ways = 2;
1269                 c->icache.waybit = 0;
1270
1271                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1272                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1273                 if (prid & 0x3)
1274                         c->dcache.ways = 4;
1275                 else
1276                         c->dcache.ways = 2;
1277                 c->dcache.waybit = 0;
1278                 break;
1279
1280         case CPU_LOONGSON64:
1281                 config1 = read_c0_config1();
1282                 lsize = (config1 >> 19) & 7;
1283                 if (lsize)
1284                         c->icache.linesz = 2 << lsize;
1285                 else
1286                         c->icache.linesz = 0;
1287                 c->icache.sets = 64 << ((config1 >> 22) & 7);
1288                 c->icache.ways = 1 + ((config1 >> 16) & 7);
1289                 icache_size = c->icache.sets *
1290                                           c->icache.ways *
1291                                           c->icache.linesz;
1292                 c->icache.waybit = 0;
1293
1294                 lsize = (config1 >> 10) & 7;
1295                 if (lsize)
1296                         c->dcache.linesz = 2 << lsize;
1297                 else
1298                         c->dcache.linesz = 0;
1299                 c->dcache.sets = 64 << ((config1 >> 13) & 7);
1300                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1301                 dcache_size = c->dcache.sets *
1302                                           c->dcache.ways *
1303                                           c->dcache.linesz;
1304                 c->dcache.waybit = 0;
1305                 if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
1306                                 (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
1307                                 (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1308                         c->options |= MIPS_CPU_PREFETCH;
1309                 break;
1310
1311         case CPU_CAVIUM_OCTEON3:
1312                 /* For now lie about the number of ways. */
1313                 c->icache.linesz = 128;
1314                 c->icache.sets = 16;
1315                 c->icache.ways = 8;
1316                 c->icache.flags |= MIPS_CACHE_VTAG;
1317                 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1318
1319                 c->dcache.linesz = 128;
1320                 c->dcache.ways = 8;
1321                 c->dcache.sets = 8;
1322                 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1323                 c->options |= MIPS_CPU_PREFETCH;
1324                 break;
1325
1326         default:
1327                 if (!(config & MIPS_CONF_M))
1328                         panic("Don't know how to probe P-caches on this cpu.");
1329
1330                 /*
1331                  * So we seem to be a MIPS32 or MIPS64 CPU
1332                  * So let's probe the I-cache ...
1333                  */
1334                 config1 = read_c0_config1();
1335
1336                 lsize = (config1 >> 19) & 7;
1337
1338                 /* IL == 7 is reserved */
1339                 if (lsize == 7)
1340                         panic("Invalid icache line size");
1341
1342                 c->icache.linesz = lsize ? 2 << lsize : 0;
1343
1344                 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1345                 c->icache.ways = 1 + ((config1 >> 16) & 7);
1346
1347                 icache_size = c->icache.sets *
1348                               c->icache.ways *
1349                               c->icache.linesz;
1350                 c->icache.waybit = __ffs(icache_size/c->icache.ways);
1351
1352                 if (config & MIPS_CONF_VI)
1353                         c->icache.flags |= MIPS_CACHE_VTAG;
1354
1355                 /*
1356                  * Now probe the MIPS32 / MIPS64 data cache.
1357                  */
1358                 c->dcache.flags = 0;
1359
1360                 lsize = (config1 >> 10) & 7;
1361
1362                 /* DL == 7 is reserved */
1363                 if (lsize == 7)
1364                         panic("Invalid dcache line size");
1365
1366                 c->dcache.linesz = lsize ? 2 << lsize : 0;
1367
1368                 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1369                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1370
1371                 dcache_size = c->dcache.sets *
1372                               c->dcache.ways *
1373                               c->dcache.linesz;
1374                 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1375
1376                 c->options |= MIPS_CPU_PREFETCH;
1377                 break;
1378         }
1379
1380         /*
1381          * Processor configuration sanity check for the R4000SC erratum
1382          * #5.  With page sizes larger than 32kB there is no possibility
1383          * to get a VCE exception anymore so we don't care about this
1384          * misconfiguration.  The case is rather theoretical anyway;
1385          * presumably no vendor is shipping his hardware in the "bad"
1386          * configuration.
1387          */
1388         if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1389             (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1390             !(config & CONF_SC) && c->icache.linesz != 16 &&
1391             PAGE_SIZE <= 0x8000)
1392                 panic("Improper R4000SC processor configuration detected");
1393
1394         /* compute a couple of other cache variables */
1395         c->icache.waysize = icache_size / c->icache.ways;
1396         c->dcache.waysize = dcache_size / c->dcache.ways;
1397
1398         c->icache.sets = c->icache.linesz ?
1399                 icache_size / (c->icache.linesz * c->icache.ways) : 0;
1400         c->dcache.sets = c->dcache.linesz ?
1401                 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1402
1403         /*
1404          * R1x000 P-caches are odd in a positive way.  They're 32kB 2-way
1405          * virtually indexed so normally would suffer from aliases.  So
1406          * normally they'd suffer from aliases but magic in the hardware deals
1407          * with that for us so we don't need to take care ourselves.
1408          */
1409         switch (current_cpu_type()) {
1410         case CPU_20KC:
1411         case CPU_25KF:
1412         case CPU_I6400:
1413         case CPU_I6500:
1414         case CPU_SB1:
1415         case CPU_SB1A:
1416         case CPU_XLR:
1417                 c->dcache.flags |= MIPS_CACHE_PINDEX;
1418                 break;
1419
1420         case CPU_R10000:
1421         case CPU_R12000:
1422         case CPU_R14000:
1423         case CPU_R16000:
1424                 break;
1425
1426         case CPU_74K:
1427         case CPU_1074K:
1428                 has_74k_erratum = alias_74k_erratum(c);
1429                 fallthrough;
1430         case CPU_M14KC:
1431         case CPU_M14KEC:
1432         case CPU_24K:
1433         case CPU_34K:
1434         case CPU_1004K:
1435         case CPU_INTERAPTIV:
1436         case CPU_P5600:
1437         case CPU_PROAPTIV:
1438         case CPU_M5150:
1439         case CPU_QEMU_GENERIC:
1440         case CPU_P6600:
1441         case CPU_M6250:
1442                 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1443                     (c->icache.waysize > PAGE_SIZE))
1444                         c->icache.flags |= MIPS_CACHE_ALIASES;
1445                 if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1446                         /*
1447                          * Effectively physically indexed dcache,
1448                          * thus no virtual aliases.
1449                         */
1450                         c->dcache.flags |= MIPS_CACHE_PINDEX;
1451                         break;
1452                 }
1453                 fallthrough;
1454         default:
1455                 if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1456                         c->dcache.flags |= MIPS_CACHE_ALIASES;
1457         }
1458
1459         /* Physically indexed caches don't suffer from virtual aliasing */
1460         if (c->dcache.flags & MIPS_CACHE_PINDEX)
1461                 c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1462
1463         /*
1464          * In systems with CM the icache fills from L2 or closer caches, and
1465          * thus sees remote stores without needing to write them back any
1466          * further than that.
1467          */
1468         if (mips_cm_present())
1469                 c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
1470
1471         switch (current_cpu_type()) {
1472         case CPU_20KC:
1473                 /*
1474                  * Some older 20Kc chips doesn't have the 'VI' bit in
1475                  * the config register.
1476                  */
1477                 c->icache.flags |= MIPS_CACHE_VTAG;
1478                 break;
1479
1480         case CPU_ALCHEMY:
1481         case CPU_I6400:
1482         case CPU_I6500:
1483                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1484                 break;
1485
1486         case CPU_BMIPS5000:
1487                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1488                 /* Cache aliases are handled in hardware; allow HIGHMEM */
1489                 c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1490                 break;
1491
1492         case CPU_LOONGSON2EF:
1493                 /*
1494                  * LOONGSON2 has 4 way icache, but when using indexed cache op,
1495                  * one op will act on all 4 ways
1496                  */
1497                 c->icache.ways = 1;
1498         }
1499
1500         pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1501                 icache_size >> 10,
1502                 c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1503                 way_string[c->icache.ways], c->icache.linesz);
1504
1505         pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1506                 dcache_size >> 10, way_string[c->dcache.ways],
1507                 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1508                 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1509                         "cache aliases" : "no aliases",
1510                 c->dcache.linesz);
1511 }
1512
1513 static void probe_vcache(void)
1514 {
1515         struct cpuinfo_mips *c = &current_cpu_data;
1516         unsigned int config2, lsize;
1517
1518         if (current_cpu_type() != CPU_LOONGSON64)
1519                 return;
1520
1521         config2 = read_c0_config2();
1522         if ((lsize = ((config2 >> 20) & 15)))
1523                 c->vcache.linesz = 2 << lsize;
1524         else
1525                 c->vcache.linesz = lsize;
1526
1527         c->vcache.sets = 64 << ((config2 >> 24) & 15);
1528         c->vcache.ways = 1 + ((config2 >> 16) & 15);
1529
1530         vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1531
1532         c->vcache.waybit = 0;
1533         c->vcache.waysize = vcache_size / c->vcache.ways;
1534
1535         pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1536                 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
1537 }
1538
1539 /*
1540  * If you even _breathe_ on this function, look at the gcc output and make sure
1541  * it does not pop things on and off the stack for the cache sizing loop that
1542  * executes in KSEG1 space or else you will crash and burn badly.  You have
1543  * been warned.
1544  */
1545 static int probe_scache(void)
1546 {
1547         unsigned long flags, addr, begin, end, pow2;
1548         unsigned int config = read_c0_config();
1549         struct cpuinfo_mips *c = &current_cpu_data;
1550
1551         if (config & CONF_SC)
1552                 return 0;
1553
1554         begin = (unsigned long) &_stext;
1555         begin &= ~((4 * 1024 * 1024) - 1);
1556         end = begin + (4 * 1024 * 1024);
1557
1558         /*
1559          * This is such a bitch, you'd think they would make it easy to do
1560          * this.  Away you daemons of stupidity!
1561          */
1562         local_irq_save(flags);
1563
1564         /* Fill each size-multiple cache line with a valid tag. */
1565         pow2 = (64 * 1024);
1566         for (addr = begin; addr < end; addr = (begin + pow2)) {
1567                 unsigned long *p = (unsigned long *) addr;
1568                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1569                 pow2 <<= 1;
1570         }
1571
1572         /* Load first line with zero (therefore invalid) tag. */
1573         write_c0_taglo(0);
1574         write_c0_taghi(0);
1575         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1576         cache_op(Index_Store_Tag_I, begin);
1577         cache_op(Index_Store_Tag_D, begin);
1578         cache_op(Index_Store_Tag_SD, begin);
1579
1580         /* Now search for the wrap around point. */
1581         pow2 = (128 * 1024);
1582         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1583                 cache_op(Index_Load_Tag_SD, addr);
1584                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1585                 if (!read_c0_taglo())
1586                         break;
1587                 pow2 <<= 1;
1588         }
1589         local_irq_restore(flags);
1590         addr -= begin;
1591
1592         scache_size = addr;
1593         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1594         c->scache.ways = 1;
1595         c->scache.waybit = 0;           /* does not matter */
1596
1597         return 1;
1598 }
1599
1600 static void __init loongson2_sc_init(void)
1601 {
1602         struct cpuinfo_mips *c = &current_cpu_data;
1603
1604         scache_size = 512*1024;
1605         c->scache.linesz = 32;
1606         c->scache.ways = 4;
1607         c->scache.waybit = 0;
1608         c->scache.waysize = scache_size / (c->scache.ways);
1609         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1610         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1611                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1612
1613         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1614 }
1615
1616 static void __init loongson3_sc_init(void)
1617 {
1618         struct cpuinfo_mips *c = &current_cpu_data;
1619         unsigned int config2, lsize;
1620
1621         config2 = read_c0_config2();
1622         lsize = (config2 >> 4) & 15;
1623         if (lsize)
1624                 c->scache.linesz = 2 << lsize;
1625         else
1626                 c->scache.linesz = 0;
1627         c->scache.sets = 64 << ((config2 >> 8) & 15);
1628         c->scache.ways = 1 + (config2 & 15);
1629
1630         scache_size = c->scache.sets *
1631                                   c->scache.ways *
1632                                   c->scache.linesz;
1633
1634         /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
1635         if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1636                 scache_size *= 2;
1637         else
1638                 scache_size *= 4;
1639
1640         c->scache.waybit = 0;
1641         c->scache.waysize = scache_size / c->scache.ways;
1642         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1643                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1644         if (scache_size)
1645                 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1646         return;
1647 }
1648
1649 extern int r5k_sc_init(void);
1650 extern int rm7k_sc_init(void);
1651 extern int mips_sc_init(void);
1652
1653 static void setup_scache(void)
1654 {
1655         struct cpuinfo_mips *c = &current_cpu_data;
1656         unsigned int config = read_c0_config();
1657         int sc_present = 0;
1658
1659         /*
1660          * Do the probing thing on R4000SC and R4400SC processors.  Other
1661          * processors don't have a S-cache that would be relevant to the
1662          * Linux memory management.
1663          */
1664         switch (current_cpu_type()) {
1665         case CPU_R4000SC:
1666         case CPU_R4000MC:
1667         case CPU_R4400SC:
1668         case CPU_R4400MC:
1669                 sc_present = run_uncached(probe_scache);
1670                 if (sc_present)
1671                         c->options |= MIPS_CPU_CACHE_CDEX_S;
1672                 break;
1673
1674         case CPU_R10000:
1675         case CPU_R12000:
1676         case CPU_R14000:
1677         case CPU_R16000:
1678                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1679                 c->scache.linesz = 64 << ((config >> 13) & 1);
1680                 c->scache.ways = 2;
1681                 c->scache.waybit= 0;
1682                 sc_present = 1;
1683                 break;
1684
1685         case CPU_R5000:
1686         case CPU_NEVADA:
1687 #ifdef CONFIG_R5000_CPU_SCACHE
1688                 r5k_sc_init();
1689 #endif
1690                 return;
1691
1692         case CPU_RM7000:
1693 #ifdef CONFIG_RM7000_CPU_SCACHE
1694                 rm7k_sc_init();
1695 #endif
1696                 return;
1697
1698         case CPU_LOONGSON2EF:
1699                 loongson2_sc_init();
1700                 return;
1701
1702         case CPU_LOONGSON64:
1703                 loongson3_sc_init();
1704                 return;
1705
1706         case CPU_CAVIUM_OCTEON3:
1707         case CPU_XLP:
1708                 /* don't need to worry about L2, fully coherent */
1709                 return;
1710
1711         default:
1712                 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1713                                     MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1714                                     MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1715 #ifdef CONFIG_MIPS_CPU_SCACHE
1716                         if (mips_sc_init ()) {
1717                                 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1718                                 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1719                                        scache_size >> 10,
1720                                        way_string[c->scache.ways], c->scache.linesz);
1721                         }
1722 #else
1723                         if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1724                                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1725 #endif
1726                         return;
1727                 }
1728                 sc_present = 0;
1729         }
1730
1731         if (!sc_present)
1732                 return;
1733
1734         /* compute a couple of other cache variables */
1735         c->scache.waysize = scache_size / c->scache.ways;
1736
1737         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1738
1739         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1740                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1741
1742         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1743 }
1744
1745 void au1x00_fixup_config_od(void)
1746 {
1747         /*
1748          * c0_config.od (bit 19) was write only (and read as 0)
1749          * on the early revisions of Alchemy SOCs.  It disables the bus
1750          * transaction overlapping and needs to be set to fix various errata.
1751          */
1752         switch (read_c0_prid()) {
1753         case 0x00030100: /* Au1000 DA */
1754         case 0x00030201: /* Au1000 HA */
1755         case 0x00030202: /* Au1000 HB */
1756         case 0x01030200: /* Au1500 AB */
1757         /*
1758          * Au1100 errata actually keeps silence about this bit, so we set it
1759          * just in case for those revisions that require it to be set according
1760          * to the (now gone) cpu table.
1761          */
1762         case 0x02030200: /* Au1100 AB */
1763         case 0x02030201: /* Au1100 BA */
1764         case 0x02030202: /* Au1100 BC */
1765                 set_c0_config(1 << 19);
1766                 break;
1767         }
1768 }
1769
1770 /* CP0 hazard avoidance. */
1771 #define NXP_BARRIER()                                                   \
1772          __asm__ __volatile__(                                          \
1773         ".set noreorder\n\t"                                            \
1774         "nop; nop; nop; nop; nop; nop;\n\t"                             \
1775         ".set reorder\n\t")
1776
1777 static void nxp_pr4450_fixup_config(void)
1778 {
1779         unsigned long config0;
1780
1781         config0 = read_c0_config();
1782
1783         /* clear all three cache coherency fields */
1784         config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1785         config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1786                     ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1787                     ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1788         write_c0_config(config0);
1789         NXP_BARRIER();
1790 }
1791
1792 static int cca = -1;
1793
1794 static int __init cca_setup(char *str)
1795 {
1796         get_option(&str, &cca);
1797
1798         return 0;
1799 }
1800
1801 early_param("cca", cca_setup);
1802
1803 static void coherency_setup(void)
1804 {
1805         if (cca < 0 || cca > 7)
1806                 cca = read_c0_config() & CONF_CM_CMASK;
1807         _page_cachable_default = cca << _CACHE_SHIFT;
1808
1809         pr_debug("Using cache attribute %d\n", cca);
1810         change_c0_config(CONF_CM_CMASK, cca);
1811
1812         /*
1813          * c0_status.cu=0 specifies that updates by the sc instruction use
1814          * the coherency mode specified by the TLB; 1 means cachable
1815          * coherent update on write will be used.  Not all processors have
1816          * this bit and; some wire it to zero, others like Toshiba had the
1817          * silly idea of putting something else there ...
1818          */
1819         switch (current_cpu_type()) {
1820         case CPU_R4000PC:
1821         case CPU_R4000SC:
1822         case CPU_R4000MC:
1823         case CPU_R4400PC:
1824         case CPU_R4400SC:
1825         case CPU_R4400MC:
1826                 clear_c0_config(CONF_CU);
1827                 break;
1828         /*
1829          * We need to catch the early Alchemy SOCs with
1830          * the write-only co_config.od bit and set it back to one on:
1831          * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1832          */
1833         case CPU_ALCHEMY:
1834                 au1x00_fixup_config_od();
1835                 break;
1836
1837         case PRID_IMP_PR4450:
1838                 nxp_pr4450_fixup_config();
1839                 break;
1840         }
1841 }
1842
1843 static void r4k_cache_error_setup(void)
1844 {
1845         extern char __weak except_vec2_generic;
1846         extern char __weak except_vec2_sb1;
1847
1848         switch (current_cpu_type()) {
1849         case CPU_SB1:
1850         case CPU_SB1A:
1851                 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1852                 break;
1853
1854         default:
1855                 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1856                 break;
1857         }
1858 }
1859
1860 void r4k_cache_init(void)
1861 {
1862         extern void build_clear_page(void);
1863         extern void build_copy_page(void);
1864         struct cpuinfo_mips *c = &current_cpu_data;
1865
1866         probe_pcache();
1867         probe_vcache();
1868         setup_scache();
1869
1870         r4k_blast_dcache_page_setup();
1871         r4k_blast_dcache_page_indexed_setup();
1872         r4k_blast_dcache_setup();
1873         r4k_blast_icache_page_setup();
1874         r4k_blast_icache_page_indexed_setup();
1875         r4k_blast_icache_setup();
1876         r4k_blast_scache_page_setup();
1877         r4k_blast_scache_page_indexed_setup();
1878         r4k_blast_scache_setup();
1879         r4k_blast_scache_node_setup();
1880 #ifdef CONFIG_EVA
1881         r4k_blast_dcache_user_page_setup();
1882         r4k_blast_icache_user_page_setup();
1883 #endif
1884
1885         /*
1886          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1887          * This code supports virtually indexed processors and will be
1888          * unnecessarily inefficient on physically indexed processors.
1889          */
1890         if (c->dcache.linesz && cpu_has_dc_aliases)
1891                 shm_align_mask = max_t( unsigned long,
1892                                         c->dcache.sets * c->dcache.linesz - 1,
1893                                         PAGE_SIZE - 1);
1894         else
1895                 shm_align_mask = PAGE_SIZE-1;
1896
1897         __flush_cache_vmap      = r4k__flush_cache_vmap;
1898         __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1899
1900         flush_cache_all         = cache_noop;
1901         __flush_cache_all       = r4k___flush_cache_all;
1902         flush_cache_mm          = r4k_flush_cache_mm;
1903         flush_cache_page        = r4k_flush_cache_page;
1904         flush_cache_range       = r4k_flush_cache_range;
1905
1906         __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1907
1908         flush_icache_all        = r4k_flush_icache_all;
1909         local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1910         flush_data_cache_page   = r4k_flush_data_cache_page;
1911         flush_icache_range      = r4k_flush_icache_range;
1912         local_flush_icache_range        = local_r4k_flush_icache_range;
1913         __flush_icache_user_range       = r4k_flush_icache_user_range;
1914         __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
1915
1916 #ifdef CONFIG_DMA_NONCOHERENT
1917 #ifdef CONFIG_DMA_MAYBE_COHERENT
1918         if (coherentio == IO_COHERENCE_ENABLED ||
1919             (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
1920                 _dma_cache_wback_inv    = (void *)cache_noop;
1921                 _dma_cache_wback        = (void *)cache_noop;
1922                 _dma_cache_inv          = (void *)cache_noop;
1923         } else
1924 #endif /* CONFIG_DMA_MAYBE_COHERENT */
1925         {
1926                 _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1927                 _dma_cache_wback        = r4k_dma_cache_wback_inv;
1928                 _dma_cache_inv          = r4k_dma_cache_inv;
1929         }
1930 #endif /* CONFIG_DMA_NONCOHERENT */
1931
1932         build_clear_page();
1933         build_copy_page();
1934
1935         /*
1936          * We want to run CMP kernels on core with and without coherent
1937          * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1938          * or not to flush caches.
1939          */
1940         local_r4k___flush_cache_all(NULL);
1941
1942         coherency_setup();
1943         board_cache_error_setup = r4k_cache_error_setup;
1944
1945         /*
1946          * Per-CPU overrides
1947          */
1948         switch (current_cpu_type()) {
1949         case CPU_BMIPS4350:
1950         case CPU_BMIPS4380:
1951                 /* No IPI is needed because all CPUs share the same D$ */
1952                 flush_data_cache_page = r4k_blast_dcache_page;
1953                 break;
1954         case CPU_BMIPS5000:
1955                 /* We lose our superpowers if L2 is disabled */
1956                 if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1957                         break;
1958
1959                 /* I$ fills from D$ just by emptying the write buffers */
1960                 flush_cache_page = (void *)b5k_instruction_hazard;
1961                 flush_cache_range = (void *)b5k_instruction_hazard;
1962                 local_flush_data_cache_page = (void *)b5k_instruction_hazard;
1963                 flush_data_cache_page = (void *)b5k_instruction_hazard;
1964                 flush_icache_range = (void *)b5k_instruction_hazard;
1965                 local_flush_icache_range = (void *)b5k_instruction_hazard;
1966
1967
1968                 /* Optimization: an L2 flush implicitly flushes the L1 */
1969                 current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1970                 break;
1971         case CPU_LOONGSON64:
1972                 /* Loongson-3 maintains cache coherency by hardware */
1973                 __flush_cache_all       = cache_noop;
1974                 __flush_cache_vmap      = cache_noop;
1975                 __flush_cache_vunmap    = cache_noop;
1976                 __flush_kernel_vmap_range = (void *)cache_noop;
1977                 flush_cache_mm          = (void *)cache_noop;
1978                 flush_cache_page        = (void *)cache_noop;
1979                 flush_cache_range       = (void *)cache_noop;
1980                 flush_icache_all        = (void *)cache_noop;
1981                 flush_data_cache_page   = (void *)cache_noop;
1982                 local_flush_data_cache_page     = (void *)cache_noop;
1983                 break;
1984         }
1985 }
1986
1987 static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1988                                void *v)
1989 {
1990         switch (cmd) {
1991         case CPU_PM_ENTER_FAILED:
1992         case CPU_PM_EXIT:
1993                 coherency_setup();
1994                 break;
1995         }
1996
1997         return NOTIFY_OK;
1998 }
1999
2000 static struct notifier_block r4k_cache_pm_notifier_block = {
2001         .notifier_call = r4k_cache_pm_notifier,
2002 };
2003
2004 int __init r4k_cache_init_pm(void)
2005 {
2006         return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
2007 }
2008 arch_initcall(r4k_cache_init_pm);