1047c6872d4f7a0a3e9604f1063a1f0f2b6ab43d
[linux-2.6-microblaze.git] / mm / page_reporting.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/mmzone.h>
4 #include <linux/page_reporting.h>
5 #include <linux/gfp.h>
6 #include <linux/export.h>
7 #include <linux/delay.h>
8 #include <linux/scatterlist.h>
9
10 #include "page_reporting.h"
11 #include "internal.h"
12
13 #define PAGE_REPORTING_DELAY    (2 * HZ)
14 static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
15
16 enum {
17         PAGE_REPORTING_IDLE = 0,
18         PAGE_REPORTING_REQUESTED,
19         PAGE_REPORTING_ACTIVE
20 };
21
22 /* request page reporting */
23 static void
24 __page_reporting_request(struct page_reporting_dev_info *prdev)
25 {
26         unsigned int state;
27
28         /* Check to see if we are in desired state */
29         state = atomic_read(&prdev->state);
30         if (state == PAGE_REPORTING_REQUESTED)
31                 return;
32
33         /*
34          *  If reporting is already active there is nothing we need to do.
35          *  Test against 0 as that represents PAGE_REPORTING_IDLE.
36          */
37         state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
38         if (state != PAGE_REPORTING_IDLE)
39                 return;
40
41         /*
42          * Delay the start of work to allow a sizable queue to build. For
43          * now we are limiting this to running no more than once every
44          * couple of seconds.
45          */
46         schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
47 }
48
49 /* notify prdev of free page reporting request */
50 void __page_reporting_notify(void)
51 {
52         struct page_reporting_dev_info *prdev;
53
54         /*
55          * We use RCU to protect the pr_dev_info pointer. In almost all
56          * cases this should be present, however in the unlikely case of
57          * a shutdown this will be NULL and we should exit.
58          */
59         rcu_read_lock();
60         prdev = rcu_dereference(pr_dev_info);
61         if (likely(prdev))
62                 __page_reporting_request(prdev);
63
64         rcu_read_unlock();
65 }
66
67 static void
68 page_reporting_drain(struct page_reporting_dev_info *prdev,
69                      struct scatterlist *sgl, unsigned int nents, bool reported)
70 {
71         struct scatterlist *sg = sgl;
72
73         /*
74          * Drain the now reported pages back into their respective
75          * free lists/areas. We assume at least one page is populated.
76          */
77         do {
78                 struct page *page = sg_page(sg);
79                 int mt = get_pageblock_migratetype(page);
80                 unsigned int order = get_order(sg->length);
81
82                 __putback_isolated_page(page, order, mt);
83
84                 /* If the pages were not reported due to error skip flagging */
85                 if (!reported)
86                         continue;
87
88                 /*
89                  * If page was not comingled with another page we can
90                  * consider the result to be "reported" since the page
91                  * hasn't been modified, otherwise we will need to
92                  * report on the new larger page when we make our way
93                  * up to that higher order.
94                  */
95                 if (PageBuddy(page) && page_order(page) == order)
96                         __SetPageReported(page);
97         } while ((sg = sg_next(sg)));
98
99         /* reinitialize scatterlist now that it is empty */
100         sg_init_table(sgl, nents);
101 }
102
103 /*
104  * The page reporting cycle consists of 4 stages, fill, report, drain, and
105  * idle. We will cycle through the first 3 stages until we cannot obtain a
106  * full scatterlist of pages, in that case we will switch to idle.
107  */
108 static int
109 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
110                      unsigned int order, unsigned int mt,
111                      struct scatterlist *sgl, unsigned int *offset)
112 {
113         struct free_area *area = &zone->free_area[order];
114         struct list_head *list = &area->free_list[mt];
115         unsigned int page_len = PAGE_SIZE << order;
116         struct page *page, *next;
117         int err = 0;
118
119         /*
120          * Perform early check, if free area is empty there is
121          * nothing to process so we can skip this free_list.
122          */
123         if (list_empty(list))
124                 return err;
125
126         spin_lock_irq(&zone->lock);
127
128         /* loop through free list adding unreported pages to sg list */
129         list_for_each_entry_safe(page, next, list, lru) {
130                 /* We are going to skip over the reported pages. */
131                 if (PageReported(page))
132                         continue;
133
134                 /* Attempt to pull page from list */
135                 if (!__isolate_free_page(page, order))
136                         break;
137
138                 /* Add page to scatter list */
139                 --(*offset);
140                 sg_set_page(&sgl[*offset], page, page_len, 0);
141
142                 /* If scatterlist isn't full grab more pages */
143                 if (*offset)
144                         continue;
145
146                 /* release lock before waiting on report processing */
147                 spin_unlock_irq(&zone->lock);
148
149                 /* begin processing pages in local list */
150                 err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
151
152                 /* reset offset since the full list was reported */
153                 *offset = PAGE_REPORTING_CAPACITY;
154
155                 /* reacquire zone lock and resume processing */
156                 spin_lock_irq(&zone->lock);
157
158                 /* flush reported pages from the sg list */
159                 page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
160
161                 /*
162                  * Reset next to first entry, the old next isn't valid
163                  * since we dropped the lock to report the pages
164                  */
165                 next = list_first_entry(list, struct page, lru);
166
167                 /* exit on error */
168                 if (err)
169                         break;
170         }
171
172         spin_unlock_irq(&zone->lock);
173
174         return err;
175 }
176
177 static int
178 page_reporting_process_zone(struct page_reporting_dev_info *prdev,
179                             struct scatterlist *sgl, struct zone *zone)
180 {
181         unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY;
182         unsigned long watermark;
183         int err = 0;
184
185         /* Generate minimum watermark to be able to guarantee progress */
186         watermark = low_wmark_pages(zone) +
187                     (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
188
189         /*
190          * Cancel request if insufficient free memory or if we failed
191          * to allocate page reporting statistics for the zone.
192          */
193         if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
194                 return err;
195
196         /* Process each free list starting from lowest order/mt */
197         for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
198                 for (mt = 0; mt < MIGRATE_TYPES; mt++) {
199                         /* We do not pull pages from the isolate free list */
200                         if (is_migrate_isolate(mt))
201                                 continue;
202
203                         err = page_reporting_cycle(prdev, zone, order, mt,
204                                                    sgl, &offset);
205                         if (err)
206                                 return err;
207                 }
208         }
209
210         /* report the leftover pages before going idle */
211         leftover = PAGE_REPORTING_CAPACITY - offset;
212         if (leftover) {
213                 sgl = &sgl[offset];
214                 err = prdev->report(prdev, sgl, leftover);
215
216                 /* flush any remaining pages out from the last report */
217                 spin_lock_irq(&zone->lock);
218                 page_reporting_drain(prdev, sgl, leftover, !err);
219                 spin_unlock_irq(&zone->lock);
220         }
221
222         return err;
223 }
224
225 static void page_reporting_process(struct work_struct *work)
226 {
227         struct delayed_work *d_work = to_delayed_work(work);
228         struct page_reporting_dev_info *prdev =
229                 container_of(d_work, struct page_reporting_dev_info, work);
230         int err = 0, state = PAGE_REPORTING_ACTIVE;
231         struct scatterlist *sgl;
232         struct zone *zone;
233
234         /*
235          * Change the state to "Active" so that we can track if there is
236          * anyone requests page reporting after we complete our pass. If
237          * the state is not altered by the end of the pass we will switch
238          * to idle and quit scheduling reporting runs.
239          */
240         atomic_set(&prdev->state, state);
241
242         /* allocate scatterlist to store pages being reported on */
243         sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL);
244         if (!sgl)
245                 goto err_out;
246
247         sg_init_table(sgl, PAGE_REPORTING_CAPACITY);
248
249         for_each_zone(zone) {
250                 err = page_reporting_process_zone(prdev, sgl, zone);
251                 if (err)
252                         break;
253         }
254
255         kfree(sgl);
256 err_out:
257         /*
258          * If the state has reverted back to requested then there may be
259          * additional pages to be processed. We will defer for 2s to allow
260          * more pages to accumulate.
261          */
262         state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
263         if (state == PAGE_REPORTING_REQUESTED)
264                 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
265 }
266
267 static DEFINE_MUTEX(page_reporting_mutex);
268 DEFINE_STATIC_KEY_FALSE(page_reporting_enabled);
269
270 int page_reporting_register(struct page_reporting_dev_info *prdev)
271 {
272         int err = 0;
273
274         mutex_lock(&page_reporting_mutex);
275
276         /* nothing to do if already in use */
277         if (rcu_access_pointer(pr_dev_info)) {
278                 err = -EBUSY;
279                 goto err_out;
280         }
281
282         /* initialize state and work structures */
283         atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
284         INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
285
286         /* Begin initial flush of zones */
287         __page_reporting_request(prdev);
288
289         /* Assign device to allow notifications */
290         rcu_assign_pointer(pr_dev_info, prdev);
291
292         /* enable page reporting notification */
293         if (!static_key_enabled(&page_reporting_enabled)) {
294                 static_branch_enable(&page_reporting_enabled);
295                 pr_info("Free page reporting enabled\n");
296         }
297 err_out:
298         mutex_unlock(&page_reporting_mutex);
299
300         return err;
301 }
302 EXPORT_SYMBOL_GPL(page_reporting_register);
303
304 void page_reporting_unregister(struct page_reporting_dev_info *prdev)
305 {
306         mutex_lock(&page_reporting_mutex);
307
308         if (rcu_access_pointer(pr_dev_info) == prdev) {
309                 /* Disable page reporting notification */
310                 RCU_INIT_POINTER(pr_dev_info, NULL);
311                 synchronize_rcu();
312
313                 /* Flush any existing work, and lock it out */
314                 cancel_delayed_work_sync(&prdev->work);
315         }
316
317         mutex_unlock(&page_reporting_mutex);
318 }
319 EXPORT_SYMBOL_GPL(page_reporting_unregister);