1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 SiFive
7 #include <asm/cacheflush.h>
13 static void ipi_remote_fence_i(void *info)
15 return local_flush_icache_all();
18 void flush_icache_all(void)
20 local_flush_icache_all();
22 if (IS_ENABLED(CONFIG_RISCV_SBI))
23 sbi_remote_fence_i(NULL);
25 on_each_cpu(ipi_remote_fence_i, NULL, 1);
27 EXPORT_SYMBOL(flush_icache_all);
30 * Performs an icache flush for the given MM context. RISC-V has no direct
31 * mechanism for instruction cache shoot downs, so instead we send an IPI that
32 * informs the remote harts they need to flush their local instruction caches.
33 * To avoid pathologically slow behavior in a common case (a bunch of
34 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
35 * IPIs for harts that are not currently executing a MM context and instead
36 * schedule a deferred local instruction cache flush to be performed before
37 * execution resumes on each hart.
39 void flush_icache_mm(struct mm_struct *mm, bool local)
42 cpumask_t others, *mask;
46 /* Mark every hart's icache as needing a flush for this MM. */
47 mask = &mm->context.icache_stale_mask;
49 /* Flush this hart's I$ now, and mark it as flushed. */
50 cpu = smp_processor_id();
51 cpumask_clear_cpu(cpu, mask);
52 local_flush_icache_all();
55 * Flush the I$ of other harts concurrently executing, and mark them as
58 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
59 local |= cpumask_empty(&others);
60 if (mm == current->active_mm && local) {
62 * It's assumed that at least one strongly ordered operation is
63 * performed on this hart between setting a hart's cpumask bit
64 * and scheduling this MM context on that hart. Sending an SBI
65 * remote message will do this, but in the case where no
66 * messages are sent we still need to order this hart's writes
67 * with flush_icache_deferred().
70 } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
71 sbi_remote_fence_i(&others);
73 on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
79 #endif /* CONFIG_SMP */
82 void flush_icache_pte(pte_t pte)
84 struct page *page = pte_page(pte);
87 * HugeTLB pages are always fully mapped, so only setting head page's
88 * PG_dcache_clean flag is enough.
91 page = compound_head(page);
93 if (!test_bit(PG_dcache_clean, &page->flags)) {
95 set_bit(PG_dcache_clean, &page->flags);
98 #endif /* CONFIG_MMU */
100 unsigned int riscv_cbom_block_size;
101 EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
103 void riscv_init_cbom_blocksize(void)
105 struct device_node *node;
106 unsigned long cbom_hartid;
107 u32 val, probed_block_size;
110 probed_block_size = 0;
111 for_each_of_cpu_node(node) {
112 unsigned long hartid;
114 ret = riscv_of_processor_hartid(node, &hartid);
118 /* set block-size for cbom extension if available */
119 ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
123 if (!probed_block_size) {
124 probed_block_size = val;
125 cbom_hartid = hartid;
127 if (probed_block_size != val)
128 pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
129 cbom_hartid, hartid);
133 if (probed_block_size)
134 riscv_cbom_block_size = probed_block_size;