1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
15 #include "nfit_test.h"
17 static LIST_HEAD(iomap_head);
19 static struct iomap_ops {
20 nfit_test_lookup_fn nfit_test_lookup;
21 nfit_test_evaluate_dsm_fn evaluate_dsm;
22 struct list_head list;
24 .list = LIST_HEAD_INIT(iomap_ops.list),
27 void nfit_test_setup(nfit_test_lookup_fn lookup,
28 nfit_test_evaluate_dsm_fn evaluate)
30 iomap_ops.nfit_test_lookup = lookup;
31 iomap_ops.evaluate_dsm = evaluate;
32 list_add_rcu(&iomap_ops.list, &iomap_head);
34 EXPORT_SYMBOL(nfit_test_setup);
36 void nfit_test_teardown(void)
38 list_del_rcu(&iomap_ops.list);
41 EXPORT_SYMBOL(nfit_test_teardown);
43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
45 struct iomap_ops *ops;
47 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
49 return ops->nfit_test_lookup(resource);
53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
55 struct nfit_test_resource *res;
58 res = __get_nfit_res(resource);
63 EXPORT_SYMBOL(get_nfit_res);
65 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
68 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
71 return (void __iomem *) nfit_res->buf + offset
72 - nfit_res->res.start;
73 return fallback_fn(offset, size);
76 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
77 resource_size_t offset, unsigned long size)
79 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
82 return (void __iomem *) nfit_res->buf + offset
83 - nfit_res->res.start;
84 return devm_ioremap_nocache(dev, offset, size);
86 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 size_t size, unsigned long flags)
91 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
94 return nfit_res->buf + offset - nfit_res->res.start;
95 return devm_memremap(dev, offset, size, flags);
97 EXPORT_SYMBOL(__wrap_devm_memremap);
99 static void nfit_test_kill(void *_pgmap)
101 struct dev_pagemap *pgmap = _pgmap;
103 WARN_ON(!pgmap || !pgmap->ref);
105 if (pgmap->ops && pgmap->ops->kill)
106 pgmap->ops->kill(pgmap);
108 percpu_ref_kill(pgmap->ref);
110 if (pgmap->ops && pgmap->ops->cleanup) {
111 pgmap->ops->cleanup(pgmap);
113 wait_for_completion(&pgmap->done);
114 percpu_ref_exit(pgmap->ref);
118 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
120 struct dev_pagemap *pgmap =
121 container_of(ref, struct dev_pagemap, internal_ref);
123 complete(&pgmap->done);
126 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
129 resource_size_t offset = pgmap->res.start;
130 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
133 return devm_memremap_pages(dev, pgmap);
137 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
138 return ERR_PTR(-EINVAL);
140 init_completion(&pgmap->done);
141 error = percpu_ref_init(&pgmap->internal_ref,
142 dev_pagemap_percpu_release, 0, GFP_KERNEL);
144 return ERR_PTR(error);
145 pgmap->ref = &pgmap->internal_ref;
147 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
148 WARN(1, "Missing reference count teardown definition\n");
149 return ERR_PTR(-EINVAL);
153 error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
155 return ERR_PTR(error);
156 return nfit_res->buf + offset - nfit_res->res.start;
158 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
160 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
162 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
166 return phys_to_pfn_t(addr, flags);
168 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
170 void *__wrap_memremap(resource_size_t offset, size_t size,
173 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
176 return nfit_res->buf + offset - nfit_res->res.start;
177 return memremap(offset, size, flags);
179 EXPORT_SYMBOL(__wrap_memremap);
181 void __wrap_devm_memunmap(struct device *dev, void *addr)
183 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
187 return devm_memunmap(dev, addr);
189 EXPORT_SYMBOL(__wrap_devm_memunmap);
191 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
193 return __nfit_test_ioremap(offset, size, ioremap_nocache);
195 EXPORT_SYMBOL(__wrap_ioremap_nocache);
197 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
199 return __nfit_test_ioremap(offset, size, ioremap_wc);
201 EXPORT_SYMBOL(__wrap_ioremap_wc);
203 void __wrap_iounmap(volatile void __iomem *addr)
205 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
208 return iounmap(addr);
210 EXPORT_SYMBOL(__wrap_iounmap);
212 void __wrap_memunmap(void *addr)
214 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
218 return memunmap(addr);
220 EXPORT_SYMBOL(__wrap_memunmap);
222 static bool nfit_test_release_region(struct device *dev,
223 struct resource *parent, resource_size_t start,
226 static void nfit_devres_release(struct device *dev, void *data)
228 struct resource *res = *((struct resource **) data);
230 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
231 resource_size(res)));
234 static int match(struct device *dev, void *__res, void *match_data)
236 struct resource *res = *((struct resource **) __res);
237 resource_size_t start = *((resource_size_t *) match_data);
239 return res->start == start;
242 static bool nfit_test_release_region(struct device *dev,
243 struct resource *parent, resource_size_t start,
246 if (parent == &iomem_resource) {
247 struct nfit_test_resource *nfit_res = get_nfit_res(start);
250 struct nfit_test_request *req;
251 struct resource *res = NULL;
254 devres_release(dev, nfit_devres_release, match,
259 spin_lock(&nfit_res->lock);
260 list_for_each_entry(req, &nfit_res->requests, list)
261 if (req->res.start == start) {
263 list_del(&req->list);
266 spin_unlock(&nfit_res->lock);
268 WARN(!res || resource_size(res) != n,
269 "%s: start: %llx n: %llx mismatch: %pr\n",
270 __func__, start, n, res);
279 static struct resource *nfit_test_request_region(struct device *dev,
280 struct resource *parent, resource_size_t start,
281 resource_size_t n, const char *name, int flags)
283 struct nfit_test_resource *nfit_res;
285 if (parent == &iomem_resource) {
286 nfit_res = get_nfit_res(start);
288 struct nfit_test_request *req;
289 struct resource *res = NULL;
291 if (start + n > nfit_res->res.start
292 + resource_size(&nfit_res->res)) {
293 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
299 spin_lock(&nfit_res->lock);
300 list_for_each_entry(req, &nfit_res->requests, list)
301 if (start == req->res.start) {
305 spin_unlock(&nfit_res->lock);
308 WARN(1, "%pr already busy\n", res);
312 req = kzalloc(sizeof(*req), GFP_KERNEL);
315 INIT_LIST_HEAD(&req->list);
319 res->end = start + n - 1;
321 res->flags = resource_type(parent);
322 res->flags |= IORESOURCE_BUSY | flags;
323 spin_lock(&nfit_res->lock);
324 list_add(&req->list, &nfit_res->requests);
325 spin_unlock(&nfit_res->lock);
330 d = devres_alloc(nfit_devres_release,
331 sizeof(struct resource *),
339 pr_debug("%s: %pr\n", __func__, res);
344 return __devm_request_region(dev, parent, start, n, name);
345 return __request_region(parent, start, n, name, flags);
348 struct resource *__wrap___request_region(struct resource *parent,
349 resource_size_t start, resource_size_t n, const char *name,
352 return nfit_test_request_region(NULL, parent, start, n, name, flags);
354 EXPORT_SYMBOL(__wrap___request_region);
356 int __wrap_insert_resource(struct resource *parent, struct resource *res)
358 if (get_nfit_res(res->start))
360 return insert_resource(parent, res);
362 EXPORT_SYMBOL(__wrap_insert_resource);
364 int __wrap_remove_resource(struct resource *res)
366 if (get_nfit_res(res->start))
368 return remove_resource(res);
370 EXPORT_SYMBOL(__wrap_remove_resource);
372 struct resource *__wrap___devm_request_region(struct device *dev,
373 struct resource *parent, resource_size_t start,
374 resource_size_t n, const char *name)
378 return nfit_test_request_region(dev, parent, start, n, name, 0);
380 EXPORT_SYMBOL(__wrap___devm_request_region);
382 void __wrap___release_region(struct resource *parent, resource_size_t start,
385 if (!nfit_test_release_region(NULL, parent, start, n))
386 __release_region(parent, start, n);
388 EXPORT_SYMBOL(__wrap___release_region);
390 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
391 resource_size_t start, resource_size_t n)
393 if (!nfit_test_release_region(dev, parent, start, n))
394 __devm_release_region(dev, parent, start, n);
396 EXPORT_SYMBOL(__wrap___devm_release_region);
398 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
399 struct acpi_object_list *p, struct acpi_buffer *buf)
401 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
402 union acpi_object **obj;
404 if (!nfit_res || strcmp(path, "_FIT") || !buf)
405 return acpi_evaluate_object(handle, path, p, buf);
408 buf->length = sizeof(union acpi_object);
412 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
414 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
415 u64 rev, u64 func, union acpi_object *argv4)
417 union acpi_object *obj = ERR_PTR(-ENXIO);
418 struct iomap_ops *ops;
421 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
423 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
427 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
430 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
432 MODULE_LICENSE("GPL v2");