{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
int levels = 0, leaves = 0, level;
if (of_property_read_bool(np, "cache-size"))
if (leaves > 0)
levels = 1;
+ prev = np;
while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
levels = level;
}
+ of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
+
return 0;
}
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
int levels = 1, level = 1;
if (of_property_read_bool(np, "cache-size"))
if (of_property_read_bool(np, "d-cache-size"))
ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+ prev = np;
while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
levels = level;
}
+ of_node_put(np);
return 0;
}
if (of_property_read_string(node, "riscv,isa", &isa)) {
pr_warning("Unable to find \"riscv,isa\" devicetree entry");
+ of_node_put(node);
return;
}
+ of_node_put(node);
for (i = 0; i < strlen(isa); ++i)
elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
while ((dn = of_find_node_by_type(dn, "cpu"))) {
hart = riscv_of_processor_hartid(dn);
- if (hart < 0)
+ if (hart < 0) {
+ of_node_put(dn);
continue;
+ }
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
+ of_node_put(dn);
continue;
}
set_cpu_possible(cpuid, true);
set_cpu_present(cpuid, true);
cpuid++;
+ of_node_put(dn);
}
BUG_ON(!found_boot_cpu);