1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/mmzone.h>
4 #include <linux/page_reporting.h>
6 #include <linux/export.h>
7 #include <linux/delay.h>
8 #include <linux/scatterlist.h>
10 #include "page_reporting.h"
13 #define PAGE_REPORTING_DELAY (2 * HZ)
14 static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
17 PAGE_REPORTING_IDLE = 0,
18 PAGE_REPORTING_REQUESTED,
22 /* request page reporting */
24 __page_reporting_request(struct page_reporting_dev_info *prdev)
28 /* Check to see if we are in desired state */
29 state = atomic_read(&prdev->state);
30 if (state == PAGE_REPORTING_REQUESTED)
34 * If reporting is already active there is nothing we need to do.
35 * Test against 0 as that represents PAGE_REPORTING_IDLE.
37 state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
38 if (state != PAGE_REPORTING_IDLE)
42 * Delay the start of work to allow a sizable queue to build. For
43 * now we are limiting this to running no more than once every
46 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
49 /* notify prdev of free page reporting request */
50 void __page_reporting_notify(void)
52 struct page_reporting_dev_info *prdev;
55 * We use RCU to protect the pr_dev_info pointer. In almost all
56 * cases this should be present, however in the unlikely case of
57 * a shutdown this will be NULL and we should exit.
60 prdev = rcu_dereference(pr_dev_info);
62 __page_reporting_request(prdev);
68 page_reporting_drain(struct page_reporting_dev_info *prdev,
69 struct scatterlist *sgl, unsigned int nents, bool reported)
71 struct scatterlist *sg = sgl;
74 * Drain the now reported pages back into their respective
75 * free lists/areas. We assume at least one page is populated.
78 struct page *page = sg_page(sg);
79 int mt = get_pageblock_migratetype(page);
80 unsigned int order = get_order(sg->length);
82 __putback_isolated_page(page, order, mt);
84 /* If the pages were not reported due to error skip flagging */
89 * If page was not comingled with another page we can
90 * consider the result to be "reported" since the page
91 * hasn't been modified, otherwise we will need to
92 * report on the new larger page when we make our way
93 * up to that higher order.
95 if (PageBuddy(page) && page_order(page) == order)
96 __SetPageReported(page);
97 } while ((sg = sg_next(sg)));
99 /* reinitialize scatterlist now that it is empty */
100 sg_init_table(sgl, nents);
104 * The page reporting cycle consists of 4 stages, fill, report, drain, and
105 * idle. We will cycle through the first 3 stages until we cannot obtain a
106 * full scatterlist of pages, in that case we will switch to idle.
109 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
110 unsigned int order, unsigned int mt,
111 struct scatterlist *sgl, unsigned int *offset)
113 struct free_area *area = &zone->free_area[order];
114 struct list_head *list = &area->free_list[mt];
115 unsigned int page_len = PAGE_SIZE << order;
116 struct page *page, *next;
120 * Perform early check, if free area is empty there is
121 * nothing to process so we can skip this free_list.
123 if (list_empty(list))
126 spin_lock_irq(&zone->lock);
128 /* loop through free list adding unreported pages to sg list */
129 list_for_each_entry_safe(page, next, list, lru) {
130 /* We are going to skip over the reported pages. */
131 if (PageReported(page))
134 /* Attempt to pull page from list */
135 if (!__isolate_free_page(page, order))
138 /* Add page to scatter list */
140 sg_set_page(&sgl[*offset], page, page_len, 0);
142 /* If scatterlist isn't full grab more pages */
146 /* release lock before waiting on report processing */
147 spin_unlock_irq(&zone->lock);
149 /* begin processing pages in local list */
150 err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
152 /* reset offset since the full list was reported */
153 *offset = PAGE_REPORTING_CAPACITY;
155 /* reacquire zone lock and resume processing */
156 spin_lock_irq(&zone->lock);
158 /* flush reported pages from the sg list */
159 page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
162 * Reset next to first entry, the old next isn't valid
163 * since we dropped the lock to report the pages
165 next = list_first_entry(list, struct page, lru);
172 spin_unlock_irq(&zone->lock);
178 page_reporting_process_zone(struct page_reporting_dev_info *prdev,
179 struct scatterlist *sgl, struct zone *zone)
181 unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY;
182 unsigned long watermark;
185 /* Generate minimum watermark to be able to guarantee progress */
186 watermark = low_wmark_pages(zone) +
187 (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
190 * Cancel request if insufficient free memory or if we failed
191 * to allocate page reporting statistics for the zone.
193 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
196 /* Process each free list starting from lowest order/mt */
197 for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
198 for (mt = 0; mt < MIGRATE_TYPES; mt++) {
199 /* We do not pull pages from the isolate free list */
200 if (is_migrate_isolate(mt))
203 err = page_reporting_cycle(prdev, zone, order, mt,
210 /* report the leftover pages before going idle */
211 leftover = PAGE_REPORTING_CAPACITY - offset;
214 err = prdev->report(prdev, sgl, leftover);
216 /* flush any remaining pages out from the last report */
217 spin_lock_irq(&zone->lock);
218 page_reporting_drain(prdev, sgl, leftover, !err);
219 spin_unlock_irq(&zone->lock);
225 static void page_reporting_process(struct work_struct *work)
227 struct delayed_work *d_work = to_delayed_work(work);
228 struct page_reporting_dev_info *prdev =
229 container_of(d_work, struct page_reporting_dev_info, work);
230 int err = 0, state = PAGE_REPORTING_ACTIVE;
231 struct scatterlist *sgl;
235 * Change the state to "Active" so that we can track if there is
236 * anyone requests page reporting after we complete our pass. If
237 * the state is not altered by the end of the pass we will switch
238 * to idle and quit scheduling reporting runs.
240 atomic_set(&prdev->state, state);
242 /* allocate scatterlist to store pages being reported on */
243 sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL);
247 sg_init_table(sgl, PAGE_REPORTING_CAPACITY);
249 for_each_zone(zone) {
250 err = page_reporting_process_zone(prdev, sgl, zone);
258 * If the state has reverted back to requested then there may be
259 * additional pages to be processed. We will defer for 2s to allow
260 * more pages to accumulate.
262 state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
263 if (state == PAGE_REPORTING_REQUESTED)
264 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
267 static DEFINE_MUTEX(page_reporting_mutex);
268 DEFINE_STATIC_KEY_FALSE(page_reporting_enabled);
270 int page_reporting_register(struct page_reporting_dev_info *prdev)
274 mutex_lock(&page_reporting_mutex);
276 /* nothing to do if already in use */
277 if (rcu_access_pointer(pr_dev_info)) {
282 /* initialize state and work structures */
283 atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
284 INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
286 /* Begin initial flush of zones */
287 __page_reporting_request(prdev);
289 /* Assign device to allow notifications */
290 rcu_assign_pointer(pr_dev_info, prdev);
292 /* enable page reporting notification */
293 if (!static_key_enabled(&page_reporting_enabled)) {
294 static_branch_enable(&page_reporting_enabled);
295 pr_info("Free page reporting enabled\n");
298 mutex_unlock(&page_reporting_mutex);
302 EXPORT_SYMBOL_GPL(page_reporting_register);
304 void page_reporting_unregister(struct page_reporting_dev_info *prdev)
306 mutex_lock(&page_reporting_mutex);
308 if (rcu_access_pointer(pr_dev_info) == prdev) {
309 /* Disable page reporting notification */
310 RCU_INIT_POINTER(pr_dev_info, NULL);
313 /* Flush any existing work, and lock it out */
314 cancel_delayed_work_sync(&prdev->work);
317 mutex_unlock(&page_reporting_mutex);
319 EXPORT_SYMBOL_GPL(page_reporting_unregister);