projects
/
linux-2.6-microblaze.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
dell-led: use dell_smbios_send_request() for performing SMBIOS calls
[linux-2.6-microblaze.git]
/
mm
/
page_isolation.c
diff --git
a/mm/page_isolation.c
b/mm/page_isolation.c
index
4568fd5
..
92c4c36
100644
(file)
--- a/
mm/page_isolation.c
+++ b/
mm/page_isolation.c
@@
-9,6
+9,9
@@
#include <linux/hugetlb.h>
#include "internal.h"
#include <linux/hugetlb.h>
#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/page_isolation.h>
+
static int set_migratetype_isolate(struct page *page,
bool skip_hwpoisoned_pages)
{
static int set_migratetype_isolate(struct page *page,
bool skip_hwpoisoned_pages)
{
@@
-162,8
+165,8
@@
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long undo_pfn;
struct page *page;
unsigned long undo_pfn;
struct page *page;
- BUG_ON(
(start_pfn) & (pageblock_nr_pages - 1
));
- BUG_ON(
(end_pfn) & (pageblock_nr_pages - 1
));
+ BUG_ON(
!IS_ALIGNED(start_pfn, pageblock_nr_pages
));
+ BUG_ON(
!IS_ALIGNED(end_pfn, pageblock_nr_pages
));
for (pfn = start_pfn;
pfn < end_pfn;
for (pfn = start_pfn;
pfn < end_pfn;
@@
-193,8
+196,10
@@
int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
{
unsigned long pfn;
struct page *page;
- BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
- BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
+
+ BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
+ BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
+
for (pfn = start_pfn;
pfn < end_pfn;
pfn += pageblock_nr_pages) {
for (pfn = start_pfn;
pfn < end_pfn;
pfn += pageblock_nr_pages) {
@@
-212,7
+217,7
@@
int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
*
* Returns 1 if all pages in the range are isolated.
*/
*
* Returns 1 if all pages in the range are isolated.
*/
-static
int
+static
unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages)
{
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages)
{
@@
-237,9
+242,8
@@
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
else
break;
}
else
break;
}
- if (pfn < end_pfn)
- return 0;
- return 1;
+
+ return pfn;
}
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
}
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
@@
-248,7
+252,6
@@
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn, flags;
struct page *page;
struct zone *zone;
unsigned long pfn, flags;
struct page *page;
struct zone *zone;
- int ret;
/*
* Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
/*
* Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
@@
-266,10
+269,13
@@
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
/* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
/* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
-
ret
= __test_page_isolated_in_pageblock(start_pfn, end_pfn,
+
pfn
= __test_page_isolated_in_pageblock(start_pfn, end_pfn,
skip_hwpoisoned_pages);
spin_unlock_irqrestore(&zone->lock, flags);
skip_hwpoisoned_pages);
spin_unlock_irqrestore(&zone->lock, flags);
- return ret ? 0 : -EBUSY;
+
+ trace_test_pages_isolated(start_pfn, end_pfn, pfn);
+
+ return pfn < end_pfn ? -EBUSY : 0;
}
struct page *alloc_migrate_target(struct page *page, unsigned long private,
}
struct page *alloc_migrate_target(struct page *page, unsigned long private,