2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
36 #include <asm/assembly.h>
37 #include <asm/pgtable.h>
38 #include <asm/cache.h>
40 #include <linux/linkage.h>
41 #include <linux/init.h>
46 ENTRY_CFI(flush_tlb_all_local)
52 * The pitlbe and pdtlbe instructions should only be used to
53 * flush the entire tlb. Also, there needs to be no intervening
54 * tlb operations, e.g. tlb misses, so the operation needs
55 * to happen in real mode with all interruptions disabled.
58 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
59 rsm PSW_SM_I, %r19 /* save I-bit state */
67 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
68 mtctl %r0, %cr17 /* Clear IIASQ tail */
69 mtctl %r0, %cr17 /* Clear IIASQ head */
70 mtctl %r1, %cr18 /* IIAOQ head */
72 mtctl %r1, %cr18 /* IIAOQ tail */
73 load32 REAL_MODE_PSW, %r1
78 1: load32 PA(cache_info), %r1
80 /* Flush Instruction Tlb */
82 LDREG ITLB_SID_BASE(%r1), %r20
83 LDREG ITLB_SID_STRIDE(%r1), %r21
84 LDREG ITLB_SID_COUNT(%r1), %r22
85 LDREG ITLB_OFF_BASE(%r1), %arg0
86 LDREG ITLB_OFF_STRIDE(%r1), %arg1
87 LDREG ITLB_OFF_COUNT(%r1), %arg2
88 LDREG ITLB_LOOP(%r1), %arg3
90 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
91 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
92 copy %arg0, %r28 /* Init base addr */
94 fitmanyloop: /* Loop if LOOP >= 2 */
96 add %r21, %r20, %r20 /* increment space */
97 copy %arg2, %r29 /* Init middle loop count */
99 fitmanymiddle: /* Loop if LOOP >= 2 */
100 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
101 pitlbe %r0(%sr1, %r28)
102 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
103 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
104 copy %arg3, %r31 /* Re-init inner loop count */
106 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
107 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
109 fitoneloop: /* Loop if LOOP = 1 */
111 copy %arg0, %r28 /* init base addr */
112 copy %arg2, %r29 /* init middle loop count */
114 fitonemiddle: /* Loop if LOOP = 1 */
115 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
116 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
118 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
119 add %r21, %r20, %r20 /* increment space */
125 LDREG DTLB_SID_BASE(%r1), %r20
126 LDREG DTLB_SID_STRIDE(%r1), %r21
127 LDREG DTLB_SID_COUNT(%r1), %r22
128 LDREG DTLB_OFF_BASE(%r1), %arg0
129 LDREG DTLB_OFF_STRIDE(%r1), %arg1
130 LDREG DTLB_OFF_COUNT(%r1), %arg2
131 LDREG DTLB_LOOP(%r1), %arg3
133 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
134 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
135 copy %arg0, %r28 /* Init base addr */
137 fdtmanyloop: /* Loop if LOOP >= 2 */
139 add %r21, %r20, %r20 /* increment space */
140 copy %arg2, %r29 /* Init middle loop count */
142 fdtmanymiddle: /* Loop if LOOP >= 2 */
143 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
144 pdtlbe %r0(%sr1, %r28)
145 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
146 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
147 copy %arg3, %r31 /* Re-init inner loop count */
149 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
150 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
152 fdtoneloop: /* Loop if LOOP = 1 */
154 copy %arg0, %r28 /* init base addr */
155 copy %arg2, %r29 /* init middle loop count */
157 fdtonemiddle: /* Loop if LOOP = 1 */
158 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
159 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
161 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
162 add %r21, %r20, %r20 /* increment space */
167 * Switch back to virtual mode
178 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
179 mtctl %r0, %cr17 /* Clear IIASQ tail */
180 mtctl %r0, %cr17 /* Clear IIASQ head */
181 mtctl %r1, %cr18 /* IIAOQ head */
183 mtctl %r1, %cr18 /* IIAOQ tail */
184 load32 KERNEL_PSW, %r1
185 or %r1, %r19, %r1 /* I-bit to state on entry */
186 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
195 ENDPROC_CFI(flush_tlb_all_local)
197 .import cache_info,data
199 ENTRY_CFI(flush_instruction_cache_local)
204 load32 cache_info, %r1
206 /* Flush Instruction Cache */
208 LDREG ICACHE_BASE(%r1), %arg0
209 LDREG ICACHE_STRIDE(%r1), %arg1
210 LDREG ICACHE_COUNT(%r1), %arg2
211 LDREG ICACHE_LOOP(%r1), %arg3
212 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
214 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
215 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
217 fimanyloop: /* Loop if LOOP >= 2 */
218 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
219 fice %r0(%sr1, %arg0)
220 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
221 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
222 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
224 fioneloop: /* Loop if LOOP = 1 */
225 /* Some implementations may flush with a single fice instruction */
226 cmpib,COND(>>=),n 15, %arg2, fioneloop2
229 fice,m %arg1(%sr1, %arg0)
230 fice,m %arg1(%sr1, %arg0)
231 fice,m %arg1(%sr1, %arg0)
232 fice,m %arg1(%sr1, %arg0)
233 fice,m %arg1(%sr1, %arg0)
234 fice,m %arg1(%sr1, %arg0)
235 fice,m %arg1(%sr1, %arg0)
236 fice,m %arg1(%sr1, %arg0)
237 fice,m %arg1(%sr1, %arg0)
238 fice,m %arg1(%sr1, %arg0)
239 fice,m %arg1(%sr1, %arg0)
240 fice,m %arg1(%sr1, %arg0)
241 fice,m %arg1(%sr1, %arg0)
242 fice,m %arg1(%sr1, %arg0)
243 fice,m %arg1(%sr1, %arg0)
244 addib,COND(>) -16, %arg2, fioneloop1
245 fice,m %arg1(%sr1, %arg0)
248 cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
251 addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
252 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
256 mtsm %r22 /* restore I-bit */
262 ENDPROC_CFI(flush_instruction_cache_local)
265 .import cache_info, data
266 ENTRY_CFI(flush_data_cache_local)
271 load32 cache_info, %r1
273 /* Flush Data Cache */
275 LDREG DCACHE_BASE(%r1), %arg0
276 LDREG DCACHE_STRIDE(%r1), %arg1
277 LDREG DCACHE_COUNT(%r1), %arg2
278 LDREG DCACHE_LOOP(%r1), %arg3
279 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
281 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
282 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
284 fdmanyloop: /* Loop if LOOP >= 2 */
285 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
286 fdce %r0(%sr1, %arg0)
287 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
288 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
289 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
291 fdoneloop: /* Loop if LOOP = 1 */
292 /* Some implementations may flush with a single fdce instruction */
293 cmpib,COND(>>=),n 15, %arg2, fdoneloop2
296 fdce,m %arg1(%sr1, %arg0)
297 fdce,m %arg1(%sr1, %arg0)
298 fdce,m %arg1(%sr1, %arg0)
299 fdce,m %arg1(%sr1, %arg0)
300 fdce,m %arg1(%sr1, %arg0)
301 fdce,m %arg1(%sr1, %arg0)
302 fdce,m %arg1(%sr1, %arg0)
303 fdce,m %arg1(%sr1, %arg0)
304 fdce,m %arg1(%sr1, %arg0)
305 fdce,m %arg1(%sr1, %arg0)
306 fdce,m %arg1(%sr1, %arg0)
307 fdce,m %arg1(%sr1, %arg0)
308 fdce,m %arg1(%sr1, %arg0)
309 fdce,m %arg1(%sr1, %arg0)
310 fdce,m %arg1(%sr1, %arg0)
311 addib,COND(>) -16, %arg2, fdoneloop1
312 fdce,m %arg1(%sr1, %arg0)
315 cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
318 addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
319 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
324 mtsm %r22 /* restore I-bit */
330 ENDPROC_CFI(flush_data_cache_local)
332 /* Macros to serialize TLB purge operations on SMP. */
334 .macro tlb_lock la,flags,tmp
336 #if __PA_LDCW_ALIGNMENT > 4
337 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
338 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
340 load32 pa_tlb_lock, \la
353 .macro tlb_unlock la,flags,tmp
361 /* Clear page using kernel mapping. */
363 ENTRY_CFI(clear_page_asm)
370 /* Unroll the loop. */
371 ldi (PAGE_SIZE / 128), %r1
391 /* Note reverse branch hint for addib is taken. */
392 addib,COND(>),n -1, %r1, 1b
398 * Note that until (if) we start saving the full 64-bit register
399 * values on interrupt, we can't use std on a 32 bit kernel.
401 ldi (PAGE_SIZE / 64), %r1
421 addib,COND(>),n -1, %r1, 1b
429 ENDPROC_CFI(clear_page_asm)
431 /* Copy page using kernel mapping. */
433 ENTRY_CFI(copy_page_asm)
439 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
440 * Unroll the loop by hand and arrange insn appropriately.
441 * Prefetch doesn't improve performance on rp3440.
442 * GCC probably can do this just as well...
445 ldi (PAGE_SIZE / 128), %r1
489 /* Note reverse branch hint for addib is taken. */
490 addib,COND(>),n -1, %r1, 1b
496 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
497 * bundles (very restricted rules for bundling).
498 * Note that until (if) we start saving
499 * the full 64 bit register values on interrupt, we can't
500 * use ldd/std on a 32 bit kernel.
503 ldi (PAGE_SIZE / 64), %r1
539 addib,COND(>),n -1, %r1, 1b
547 ENDPROC_CFI(copy_page_asm)
550 * NOTE: Code in clear_user_page has a hard coded dependency on the
551 * maximum alias boundary being 4 Mb. We've been assured by the
552 * parisc chip designers that there will not ever be a parisc
553 * chip with a larger alias boundary (Never say never :-) ).
555 * Subtle: the dtlb miss handlers support the temp alias region by
556 * "knowing" that if a dtlb miss happens within the temp alias
557 * region it must have occurred while in clear_user_page. Since
558 * this routine makes use of processor local translations, we
559 * don't want to insert them into the kernel page table. Instead,
560 * we load up some general registers (they need to be registers
561 * which aren't shadowed) with the physical page numbers (preshifted
562 * for tlb insertion) needed to insert the translations. When we
563 * miss on the translation, the dtlb miss handler inserts the
564 * translation into the tlb using these values:
566 * %r26 physical page (shifted for tlb insert) of "to" translation
567 * %r23 physical page (shifted for tlb insert) of "from" translation
570 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
571 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
572 .macro convert_phys_for_tlb_insert20 phys
573 extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
574 #if _PAGE_SIZE_ENCODING_DEFAULT
575 depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
580 * copy_user_page_asm() performs a page copy using mappings
581 * equivalent to the user page mappings. It can be used to
582 * implement copy_user_page() but unfortunately both the `from'
583 * and `to' pages need to be flushed through mappings equivalent
584 * to the user mappings after the copy because the kernel accesses
585 * the `from' page through the kmap kernel mapping and the `to'
586 * page needs to be flushed since code can be copied. As a
587 * result, this implementation is less efficient than the simpler
588 * copy using the kernel mapping. It only needs the `from' page
589 * to flushed via the user mapping. The kunmap routines handle
590 * the flushes needed for the kernel mapping.
592 * I'm still keeping this around because it may be possible to
593 * use it if more information is passed into copy_user_page().
594 * Have to do some measurements to see if it is worthwhile to
595 * lobby for such a change.
599 ENTRY_CFI(copy_user_page_asm)
604 /* Convert virtual `to' and `from' addresses to physical addresses.
605 Move `from' physical address to non shadowed register. */
606 ldil L%(__PAGE_OFFSET), %r1
610 ldil L%(TMPALIAS_MAP_START), %r28
612 #if (TMPALIAS_MAP_START >= 0x80000000)
613 depdi 0, 31,32, %r28 /* clear any sign extension */
615 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
616 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
617 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
618 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
620 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
622 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
623 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
624 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
625 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
627 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
630 /* Purge any old translations */
636 tlb_lock %r20,%r21,%r22
639 tlb_unlock %r20,%r21,%r22
643 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
644 * Unroll the loop by hand and arrange insn appropriately.
645 * GCC probably can do this just as well.
649 ldi (PAGE_SIZE / 128), %r1
693 /* conditional branches nullify on forward taken branch, and on
694 * non-taken backward branch. Note that .+4 is a backwards branch.
695 * The ldd should only get executed if the branch is taken.
697 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
698 ldd 0(%r29), %r19 /* start next loads */
701 ldi (PAGE_SIZE / 64), %r1
704 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
705 * bundles (very restricted rules for bundling). It probably
706 * does OK on PCXU and better, but we could do better with
707 * ldd/std instructions. Note that until (if) we start saving
708 * the full 64 bit register values on interrupt, we can't
709 * use ldd/std on a 32 bit kernel.
746 addib,COND(>) -1, %r1,1b
755 ENDPROC_CFI(copy_user_page_asm)
757 ENTRY_CFI(clear_user_page_asm)
764 ldil L%(TMPALIAS_MAP_START), %r28
766 #if (TMPALIAS_MAP_START >= 0x80000000)
767 depdi 0, 31,32, %r28 /* clear any sign extension */
769 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
770 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
771 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
773 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
774 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
775 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
778 /* Purge any old translation */
783 tlb_lock %r20,%r21,%r22
785 tlb_unlock %r20,%r21,%r22
789 ldi (PAGE_SIZE / 128), %r1
791 /* PREFETCH (Write) has not (yet) been proven to help here */
792 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
810 addib,COND(>) -1, %r1, 1b
813 #else /* ! CONFIG_64BIT */
814 ldi (PAGE_SIZE / 64), %r1
832 addib,COND(>) -1, %r1, 1b
834 #endif /* CONFIG_64BIT */
841 ENDPROC_CFI(clear_user_page_asm)
843 ENTRY_CFI(flush_dcache_page_asm)
848 ldil L%(TMPALIAS_MAP_START), %r28
850 #if (TMPALIAS_MAP_START >= 0x80000000)
851 depdi 0, 31,32, %r28 /* clear any sign extension */
853 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
854 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
855 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
857 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
858 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
859 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
862 /* Purge any old translation */
867 tlb_lock %r20,%r21,%r22
869 tlb_unlock %r20,%r21,%r22
872 ldil L%dcache_stride, %r1
873 ldw R%dcache_stride(%r1), r31
876 depdi,z 1, 63-PAGE_SHIFT,1, %r25
878 depwi,z 1, 31-PAGE_SHIFT,1, %r25
899 cmpb,COND(<<) %r28, %r25,1b
908 ENDPROC_CFI(flush_dcache_page_asm)
910 ENTRY_CFI(flush_icache_page_asm)
915 ldil L%(TMPALIAS_MAP_START), %r28
917 #if (TMPALIAS_MAP_START >= 0x80000000)
918 depdi 0, 31,32, %r28 /* clear any sign extension */
920 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
921 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
922 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
924 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
925 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
926 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
929 /* Purge any old translation. Note that the FIC instruction
930 * may use either the instruction or data TLB. Given that we
931 * have a flat address space, it's not clear which TLB will be
932 * used. So, we purge both entries. */
936 pitlb,l %r0(%sr4,%r28)
938 tlb_lock %r20,%r21,%r22
941 tlb_unlock %r20,%r21,%r22
944 ldil L%icache_stride, %r1
945 ldw R%icache_stride(%r1), %r31
948 depdi,z 1, 63-PAGE_SHIFT,1, %r25
950 depwi,z 1, 31-PAGE_SHIFT,1, %r25
956 /* fic only has the type 26 form on PA1.1, requiring an
957 * explicit space specification, so use %sr4 */
958 1: fic,m %r31(%sr4,%r28)
959 fic,m %r31(%sr4,%r28)
960 fic,m %r31(%sr4,%r28)
961 fic,m %r31(%sr4,%r28)
962 fic,m %r31(%sr4,%r28)
963 fic,m %r31(%sr4,%r28)
964 fic,m %r31(%sr4,%r28)
965 fic,m %r31(%sr4,%r28)
966 fic,m %r31(%sr4,%r28)
967 fic,m %r31(%sr4,%r28)
968 fic,m %r31(%sr4,%r28)
969 fic,m %r31(%sr4,%r28)
970 fic,m %r31(%sr4,%r28)
971 fic,m %r31(%sr4,%r28)
972 fic,m %r31(%sr4,%r28)
973 cmpb,COND(<<) %r28, %r25,1b
974 fic,m %r31(%sr4,%r28)
982 ENDPROC_CFI(flush_icache_page_asm)
984 ENTRY_CFI(flush_kernel_dcache_page_asm)
989 ldil L%dcache_stride, %r1
990 ldw R%dcache_stride(%r1), %r23
993 depdi,z 1, 63-PAGE_SHIFT,1, %r25
995 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1016 cmpb,COND(<<) %r26, %r25,1b
1025 ENDPROC_CFI(flush_kernel_dcache_page_asm)
1027 ENTRY_CFI(purge_kernel_dcache_page_asm)
1032 ldil L%dcache_stride, %r1
1033 ldw R%dcache_stride(%r1), %r23
1036 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1038 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1040 add %r26, %r25, %r25
1041 sub %r25, %r23, %r25
1058 cmpb,COND(<<) %r26, %r25, 1b
1067 ENDPROC_CFI(purge_kernel_dcache_page_asm)
1069 ENTRY_CFI(flush_user_dcache_range_asm)
1074 ldil L%dcache_stride, %r1
1075 ldw R%dcache_stride(%r1), %r23
1077 ANDCM %r26, %r21, %r26
1079 1: cmpb,COND(<<),n %r26, %r25, 1b
1080 fdc,m %r23(%sr3, %r26)
1088 ENDPROC_CFI(flush_user_dcache_range_asm)
1090 ENTRY_CFI(flush_kernel_dcache_range_asm)
1095 ldil L%dcache_stride, %r1
1096 ldw R%dcache_stride(%r1), %r23
1098 ANDCM %r26, %r21, %r26
1100 1: cmpb,COND(<<),n %r26, %r25,1b
1110 ENDPROC_CFI(flush_kernel_dcache_range_asm)
1112 ENTRY_CFI(purge_kernel_dcache_range_asm)
1117 ldil L%dcache_stride, %r1
1118 ldw R%dcache_stride(%r1), %r23
1120 ANDCM %r26, %r21, %r26
1122 1: cmpb,COND(<<),n %r26, %r25,1b
1132 ENDPROC_CFI(purge_kernel_dcache_range_asm)
1134 ENTRY_CFI(flush_user_icache_range_asm)
1139 ldil L%icache_stride, %r1
1140 ldw R%icache_stride(%r1), %r23
1142 ANDCM %r26, %r21, %r26
1144 1: cmpb,COND(<<),n %r26, %r25,1b
1145 fic,m %r23(%sr3, %r26)
1153 ENDPROC_CFI(flush_user_icache_range_asm)
1155 ENTRY_CFI(flush_kernel_icache_page)
1160 ldil L%icache_stride, %r1
1161 ldw R%icache_stride(%r1), %r23
1164 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1166 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1168 add %r26, %r25, %r25
1169 sub %r25, %r23, %r25
1172 1: fic,m %r23(%sr4, %r26)
1173 fic,m %r23(%sr4, %r26)
1174 fic,m %r23(%sr4, %r26)
1175 fic,m %r23(%sr4, %r26)
1176 fic,m %r23(%sr4, %r26)
1177 fic,m %r23(%sr4, %r26)
1178 fic,m %r23(%sr4, %r26)
1179 fic,m %r23(%sr4, %r26)
1180 fic,m %r23(%sr4, %r26)
1181 fic,m %r23(%sr4, %r26)
1182 fic,m %r23(%sr4, %r26)
1183 fic,m %r23(%sr4, %r26)
1184 fic,m %r23(%sr4, %r26)
1185 fic,m %r23(%sr4, %r26)
1186 fic,m %r23(%sr4, %r26)
1187 cmpb,COND(<<) %r26, %r25, 1b
1188 fic,m %r23(%sr4, %r26)
1196 ENDPROC_CFI(flush_kernel_icache_page)
1198 ENTRY_CFI(flush_kernel_icache_range_asm)
1203 ldil L%icache_stride, %r1
1204 ldw R%icache_stride(%r1), %r23
1206 ANDCM %r26, %r21, %r26
1208 1: cmpb,COND(<<),n %r26, %r25, 1b
1209 fic,m %r23(%sr4, %r26)
1216 ENDPROC_CFI(flush_kernel_icache_range_asm)
1220 /* align should cover use of rfi in disable_sr_hashing_asm and
1224 ENTRY_CFI(disable_sr_hashing_asm)
1230 * Switch to real mode
1241 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1242 mtctl %r0, %cr17 /* Clear IIASQ tail */
1243 mtctl %r0, %cr17 /* Clear IIASQ head */
1244 mtctl %r1, %cr18 /* IIAOQ head */
1246 mtctl %r1, %cr18 /* IIAOQ tail */
1247 load32 REAL_MODE_PSW, %r1
1252 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1253 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1254 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1259 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1261 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1262 .word 0x141c1a00 /* must issue twice */
1263 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1264 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1265 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1266 .word 0x141c1600 /* must issue twice */
1271 /* Disable Space Register Hashing for PCXL */
1273 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1274 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1275 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1280 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1282 .word 0x144008bc /* mfdiag %dr2, %r28 */
1283 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1284 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1288 /* Switch back to virtual mode */
1289 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1297 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1298 mtctl %r0, %cr17 /* Clear IIASQ tail */
1299 mtctl %r0, %cr17 /* Clear IIASQ head */
1300 mtctl %r1, %cr18 /* IIAOQ head */
1302 mtctl %r1, %cr18 /* IIAOQ tail */
1303 load32 KERNEL_PSW, %r1
1313 ENDPROC_CFI(disable_sr_hashing_asm)