splice: Check for zero count in vfs_splice_read()
[linux-2.6-microblaze.git] / mm / compaction.c
index 9ff7123..c8bcdea 100644 (file)
@@ -583,9 +583,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                if (PageCompound(page)) {
                        const unsigned int order = compound_order(page);
 
-                       if (likely(order < MAX_ORDER)) {
+                       if (likely(order <= MAX_ORDER)) {
                                blockpfn += (1UL << order) - 1;
                                cursor += (1UL << order) - 1;
+                               nr_scanned += (1UL << order) - 1;
                        }
                        goto isolate_fail;
                }
@@ -893,6 +894,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                }
 
                if (PageHuge(page) && cc->alloc_contig) {
+                       if (locked) {
+                               unlock_page_lruvec_irqrestore(locked, flags);
+                               locked = NULL;
+                       }
+
                        ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
 
                        /*
@@ -904,6 +910,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                if (ret == -EBUSY)
                                        ret = 0;
                                low_pfn += compound_nr(page) - 1;
+                               nr_scanned += compound_nr(page) - 1;
                                goto isolate_fail;
                        }
 
@@ -938,8 +945,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         * a valid page order. Consider only values in the
                         * valid order range to prevent low_pfn overflow.
                         */
-                       if (freepage_order > 0 && freepage_order < MAX_ORDER)
+                       if (freepage_order > 0 && freepage_order <= MAX_ORDER) {
                                low_pfn += (1UL << freepage_order) - 1;
+                               nr_scanned += (1UL << freepage_order) - 1;
+                       }
                        continue;
                }
 
@@ -954,8 +963,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (PageCompound(page) && !cc->alloc_contig) {
                        const unsigned int order = compound_order(page);
 
-                       if (likely(order < MAX_ORDER))
+                       if (likely(order <= MAX_ORDER)) {
                                low_pfn += (1UL << order) - 1;
+                               nr_scanned += (1UL << order) - 1;
+                       }
                        goto isolate_fail;
                }
 
@@ -1077,6 +1088,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         */
                        if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
                                low_pfn += compound_nr(page) - 1;
+                               nr_scanned += compound_nr(page) - 1;
                                SetPageLRU(page);
                                goto isolate_fail_put;
                        }
@@ -2131,7 +2143,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
 
        /* Direct compactor: Is a suitable page free? */
        ret = COMPACT_NO_SUITABLE_PAGE;
-       for (order = cc->order; order < MAX_ORDER; order++) {
+       for (order = cc->order; order <= MAX_ORDER; order++) {
                struct free_area *area = &cc->zone->free_area[order];
                bool can_steal;