Linux 6.9-rc1
[linux-2.6-microblaze.git] / kernel / resource.c
index 4c5e80b..fcbca39 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mount.h>
 #include <linux/resource_ext.h>
 #include <uapi/linux/magic.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
 #include <asm/io.h>
 
 
@@ -56,33 +58,17 @@ struct resource_constraint {
 
 static DEFINE_RWLOCK(resource_lock);
 
-static struct resource *next_resource(struct resource *p)
+static struct resource *next_resource(struct resource *p, bool skip_children)
 {
-       if (p->child)
+       if (!skip_children && p->child)
                return p->child;
        while (!p->sibling && p->parent)
                p = p->parent;
        return p->sibling;
 }
 
-static struct resource *next_resource_skip_children(struct resource *p)
-{
-       while (!p->sibling && p->parent)
-               p = p->parent;
-       return p->sibling;
-}
-
 #define for_each_resource(_root, _p, _skip_children) \
-       for ((_p) = (_root)->child; (_p); \
-            (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
-                                      next_resource(_p))
-
-static void *r_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       struct resource *p = v;
-       (*pos)++;
-       return (void *)next_resource(p);
-}
+       for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
 
 #ifdef CONFIG_PROC_FS
 
@@ -91,14 +77,28 @@ enum { MAX_IORES_LEVEL = 5 };
 static void *r_start(struct seq_file *m, loff_t *pos)
        __acquires(resource_lock)
 {
-       struct resource *p = pde_data(file_inode(m->file));
-       loff_t l = 0;
+       struct resource *root = pde_data(file_inode(m->file));
+       struct resource *p;
+       loff_t l = *pos;
+
        read_lock(&resource_lock);
-       for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
-               ;
+       for_each_resource(root, p, false) {
+               if (l-- == 0)
+                       break;
+       }
+
        return p;
 }
 
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct resource *p = v;
+
+       (*pos)++;
+
+       return (void *)next_resource(p, false);
+}
+
 static void r_stop(struct seq_file *m, void *v)
        __releases(resource_lock)
 {
@@ -336,7 +336,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
 
        read_lock(&resource_lock);
 
-       for (p = iomem_resource.child; p; p = next_resource(p)) {
+       for_each_resource(&iomem_resource, p, false) {
                /* If we passed the resource we are looking for, stop */
                if (p->start > end) {
                        p = NULL;
@@ -431,6 +431,61 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
                                     func);
 }
 
+/*
+ * This function, being a variant of walk_system_ram_res(), calls the @func
+ * callback against all memory ranges of type System RAM which are marked as
+ * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
+ * higher to lower.
+ */
+int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+                               int (*func)(struct resource *, void *))
+{
+       struct resource res, *rams;
+       int rams_size = 16, i;
+       unsigned long flags;
+       int ret = -1;
+
+       /* create a list */
+       rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
+       if (!rams)
+               return ret;
+
+       flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+       i = 0;
+       while ((start < end) &&
+               (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
+               if (i >= rams_size) {
+                       /* re-alloc */
+                       struct resource *rams_new;
+
+                       rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
+                                            (rams_size + 16) * sizeof(struct resource),
+                                            GFP_KERNEL);
+                       if (!rams_new)
+                               goto out;
+
+                       rams = rams_new;
+                       rams_size += 16;
+               }
+
+               rams[i].start = res.start;
+               rams[i++].end = res.end;
+
+               start = res.end + 1;
+       }
+
+       /* go reverse */
+       for (i--; i >= 0; i--) {
+               ret = (*func)(&rams[i], arg);
+               if (ret)
+                       break;
+       }
+
+out:
+       kvfree(rams);
+       return ret;
+}
+
 /*
  * This function calls the @func callback against all memory ranges, which
  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
@@ -888,7 +943,7 @@ void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
                if (conflict->end > new->end)
                        new->end = conflict->end;
 
-               printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
+               pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
        }
        write_unlock(&resource_lock);
 }
@@ -1283,9 +1338,7 @@ void __release_region(struct resource *parent, resource_size_t start,
 
        write_unlock(&resource_lock);
 
-       printk(KERN_WARNING "Trying to free nonexistent resource "
-               "<%016llx-%016llx>\n", (unsigned long long)start,
-               (unsigned long long)end);
+       pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
 }
 EXPORT_SYMBOL(__release_region);
 
@@ -1345,20 +1398,6 @@ retry:
                        continue;
                }
 
-               /*
-                * All memory regions added from memory-hotplug path have the
-                * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
-                * this flag, we know that we are dealing with a resource coming
-                * from HMM/devm. HMM/devm use another mechanism to add/release
-                * a resource. This goes via devm_request_mem_region and
-                * devm_release_mem_region.
-                * HMM/devm take care to release their resources when they want,
-                * so if we are dealing with them, let us just back off here.
-                */
-               if (!(res->flags & IORESOURCE_SYSRAM)) {
-                       break;
-               }
-
                if (!(res->flags & IORESOURCE_MEM))
                        break;
 
@@ -1657,22 +1696,22 @@ __setup("reserve=", reserve_setup);
  */
 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
 {
-       struct resource *p = &iomem_resource;
+       resource_size_t end = addr + size - 1;
+       struct resource *p;
        int err = 0;
-       loff_t l;
 
        read_lock(&resource_lock);
-       for (p = p->child; p ; p = r_next(NULL, p, &l)) {
+       for_each_resource(&iomem_resource, p, false) {
                /*
                 * We can probably skip the resources without
                 * IORESOURCE_IO attribute?
                 */
-               if (p->start >= addr + size)
+               if (p->start > end)
                        continue;
                if (p->end < addr)
                        continue;
                if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
-                   PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
+                   PFN_DOWN(p->end) >= PFN_DOWN(end))
                        continue;
                /*
                 * if a resource is "BUSY", it's not a hardware resource
@@ -1683,10 +1722,8 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
                if (p->flags & IORESOURCE_BUSY)
                        continue;
 
-               printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
-                      (unsigned long long)addr,
-                      (unsigned long long)(addr + size - 1),
-                      p->name, p);
+               pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
+                       &addr, &end, p->name, p);
                err = -1;
                break;
        }
@@ -1707,18 +1744,15 @@ static int strict_iomem_checks;
  *
  * Returns true if exclusive to the kernel, otherwise returns false.
  */
-bool iomem_is_exclusive(u64 addr)
+bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
 {
        const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
                                                  IORESOURCE_EXCLUSIVE;
        bool skip_children = false, err = false;
-       int size = PAGE_SIZE;
        struct resource *p;
 
-       addr = addr & PAGE_MASK;
-
        read_lock(&resource_lock);
-       for_each_resource(&iomem_resource, p, skip_children) {
+       for_each_resource(root, p, skip_children) {
                if (p->start >= addr + size)
                        break;
                if (p->end < addr) {
@@ -1757,6 +1791,12 @@ bool iomem_is_exclusive(u64 addr)
        return err;
 }
 
+bool iomem_is_exclusive(u64 addr)
+{
+       return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
+                                    PAGE_SIZE);
+}
+
 struct resource_entry *resource_list_create_entry(struct resource *res,
                                                  size_t extra_size)
 {
@@ -1861,8 +1901,8 @@ get_free_mem_region(struct device *dev, struct resource *base,
 
        write_lock(&resource_lock);
        for (addr = gfr_start(base, size, align, flags);
-            gfr_continue(base, addr, size, flags);
-            addr = gfr_next(addr, size, flags)) {
+            gfr_continue(base, addr, align, flags);
+            addr = gfr_next(addr, align, flags)) {
                if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
                    REGION_DISJOINT)
                        continue;