Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / tools / testing / selftests / vm / compaction_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * A test for the patch "Allow compaction of unevictable pages".
5  * With this patch we should be able to allocate at least 1/4
6  * of RAM in huge pages. Without the patch much less is
7  * allocated.
8  */
9
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <sys/mman.h>
13 #include <sys/resource.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <unistd.h>
17 #include <string.h>
18
19 #include "../kselftest.h"
20
21 #define MAP_SIZE 1048576
22
23 struct map_list {
24         void *map;
25         struct map_list *next;
26 };
27
28 int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
29 {
30         char  buffer[256] = {0};
31         char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'";
32         FILE *cmdfile = popen(cmd, "r");
33
34         if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
35                 perror("Failed to read meminfo\n");
36                 return -1;
37         }
38
39         pclose(cmdfile);
40
41         *memfree = atoll(buffer);
42         cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'";
43         cmdfile = popen(cmd, "r");
44
45         if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
46                 perror("Failed to read meminfo\n");
47                 return -1;
48         }
49
50         pclose(cmdfile);
51         *hugepagesize = atoll(buffer);
52
53         return 0;
54 }
55
56 int prereq(void)
57 {
58         char allowed;
59         int fd;
60
61         fd = open("/proc/sys/vm/compact_unevictable_allowed",
62                   O_RDONLY | O_NONBLOCK);
63         if (fd < 0) {
64                 perror("Failed to open\n"
65                        "/proc/sys/vm/compact_unevictable_allowed\n");
66                 return -1;
67         }
68
69         if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
70                 perror("Failed to read from\n"
71                        "/proc/sys/vm/compact_unevictable_allowed\n");
72                 close(fd);
73                 return -1;
74         }
75
76         close(fd);
77         if (allowed == '1')
78                 return 0;
79
80         return -1;
81 }
82
83 int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
84 {
85         int fd;
86         int compaction_index = 0;
87         char initial_nr_hugepages[10] = {0};
88         char nr_hugepages[10] = {0};
89
90         /* We want to test with 80% of available memory. Else, OOM killer comes
91            in to play */
92         mem_free = mem_free * 0.8;
93
94         fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
95         if (fd < 0) {
96                 perror("Failed to open /proc/sys/vm/nr_hugepages");
97                 return -1;
98         }
99
100         if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
101                 perror("Failed to read from /proc/sys/vm/nr_hugepages");
102                 goto close_fd;
103         }
104
105         /* Start with the initial condition of 0 huge pages*/
106         if (write(fd, "0", sizeof(char)) != sizeof(char)) {
107                 perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
108                 goto close_fd;
109         }
110
111         lseek(fd, 0, SEEK_SET);
112
113         /* Request a large number of huge pages. The Kernel will allocate
114            as much as it can */
115         if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
116                 perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
117                 goto close_fd;
118         }
119
120         lseek(fd, 0, SEEK_SET);
121
122         if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
123                 perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
124                 goto close_fd;
125         }
126
127         /* We should have been able to request at least 1/3 rd of the memory in
128            huge pages */
129         compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
130
131         if (compaction_index > 3) {
132                 printf("No of huge pages allocated = %d\n",
133                        (atoi(nr_hugepages)));
134                 fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
135                         "as huge pages\n", compaction_index);
136                 goto close_fd;
137         }
138
139         printf("No of huge pages allocated = %d\n",
140                (atoi(nr_hugepages)));
141
142         lseek(fd, 0, SEEK_SET);
143
144         if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
145             != strlen(initial_nr_hugepages)) {
146                 perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
147                 goto close_fd;
148         }
149
150         close(fd);
151         return 0;
152
153  close_fd:
154         close(fd);
155         printf("Not OK. Compaction test failed.");
156         return -1;
157 }
158
159
160 int main(int argc, char **argv)
161 {
162         struct rlimit lim;
163         struct map_list *list, *entry;
164         size_t page_size, i;
165         void *map = NULL;
166         unsigned long mem_free = 0;
167         unsigned long hugepage_size = 0;
168         unsigned long mem_fragmentable = 0;
169
170         if (prereq() != 0) {
171                 printf("Either the sysctl compact_unevictable_allowed is not\n"
172                        "set to 1 or couldn't read the proc file.\n"
173                        "Skipping the test\n");
174                 return KSFT_SKIP;
175         }
176
177         lim.rlim_cur = RLIM_INFINITY;
178         lim.rlim_max = RLIM_INFINITY;
179         if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
180                 perror("Failed to set rlimit:\n");
181                 return -1;
182         }
183
184         page_size = getpagesize();
185
186         list = NULL;
187
188         if (read_memory_info(&mem_free, &hugepage_size) != 0) {
189                 printf("ERROR: Cannot read meminfo\n");
190                 return -1;
191         }
192
193         mem_fragmentable = mem_free * 0.8 / 1024;
194
195         while (mem_fragmentable > 0) {
196                 map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
197                            MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
198                 if (map == MAP_FAILED)
199                         break;
200
201                 entry = malloc(sizeof(struct map_list));
202                 if (!entry) {
203                         munmap(map, MAP_SIZE);
204                         break;
205                 }
206                 entry->map = map;
207                 entry->next = list;
208                 list = entry;
209
210                 /* Write something (in this case the address of the map) to
211                  * ensure that KSM can't merge the mapped pages
212                  */
213                 for (i = 0; i < MAP_SIZE; i += page_size)
214                         *(unsigned long *)(map + i) = (unsigned long)map + i;
215
216                 mem_fragmentable--;
217         }
218
219         for (entry = list; entry != NULL; entry = entry->next) {
220                 munmap(entry->map, MAP_SIZE);
221                 if (!entry->next)
222                         break;
223                 entry = entry->next;
224         }
225
226         if (check_compaction(mem_free, hugepage_size) == 0)
227                 return 0;
228
229         return -1;
230 }