]> Pileus Git - ~andy/linux/blobdiff - mm/compaction.c
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[~andy/linux] / mm / compaction.c
index f58bcd016f432dd094d6f6378aa53f9799d30811..918577595ea8695298cd21ecab7acaca35b1eb92 100644 (file)
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 {
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor, *valid_page = NULL;
-       unsigned long nr_strict_required = end_pfn - blockpfn;
        unsigned long flags;
        bool locked = false;
 
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                nr_scanned++;
                if (!pfn_valid_within(blockpfn))
-                       continue;
+                       goto isolate_fail;
+
                if (!valid_page)
                        valid_page = page;
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /*
                 * The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Recheck this is a buddy page under lock */
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
-               if (!isolated && strict)
-                       break;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                if (isolated) {
                        blockpfn += isolated - 1;
                        cursor += isolated - 1;
+                       continue;
                }
+
+isolate_fail:
+               if (strict)
+                       break;
+               else
+                       continue;
+
        }
 
        trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
         * pages requested were isolated. If there were any failures, 0 is
         * returned and CMA will fail.
         */
-       if (strict && nr_strict_required > total_isolated)
+       if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
        if (locked)
@@ -459,6 +465,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        unsigned long flags;
        bool locked = false;
        struct page *page = NULL, *valid_page = NULL;
+       bool skipped_async_unsuitable = false;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -522,7 +529,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (!isolation_suitable(cc, page))
                        goto next_pageblock;
 
-               /* Skip if free */
+               /*
+                * Skip if free. page_order cannot be used without zone->lock
+                * as nothing prevents parallel allocations or buddy merging.
+                */
                if (PageBuddy(page))
                        continue;
 
@@ -534,6 +544,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (!cc->sync && last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        cc->finished_update_migrate = true;
+                       skipped_async_unsuitable = true;
                        goto next_pageblock;
                }
 
@@ -599,7 +610,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (__isolate_lru_page(page, mode) != 0)
                        continue;
 
-               VM_BUG_ON(PageTransCompound(page));
+               VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                /* Successfully isolated */
                cc->finished_update_migrate = true;
@@ -627,8 +638,13 @@ next_pageblock:
        if (locked)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       /* Update the pageblock-skip if the whole pageblock was scanned */
-       if (low_pfn == end_pfn)
+       /*
+        * Update the pageblock-skip information and cached scanner pfn,
+        * if the whole pageblock was scanned without isolating any page.
+        * This is not done when pageblock was skipped due to being unsuitable
+        * for async compaction, so that eventual sync compaction can try.
+        */
+       if (low_pfn == end_pfn && !skipped_async_unsuitable)
                update_pageblock_skip(cc, valid_page, nr_isolated, true);
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -660,7 +676,7 @@ static void isolate_freepages(struct zone *zone,
         * is the end of the pageblock the migration scanner is using.
         */
        pfn = cc->free_pfn;
-       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+       low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
         * Take care that if the migration scanner is at the end of the zone
@@ -676,7 +692,7 @@ static void isolate_freepages(struct zone *zone,
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
         */
-       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+       for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
@@ -738,7 +754,14 @@ static void isolate_freepages(struct zone *zone,
        /* split_free_page does not map the pages */
        map_pages(freelist);
 
-       cc->free_pfn = high_pfn;
+       /*
+        * If we crossed the migrate scanner, we want to keep it that way
+        * so that compact_finished() may detect this
+        */
+       if (pfn < low_pfn)
+               cc->free_pfn = max(pfn, zone->zone_start_pfn);
+       else
+               cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
 }
 
@@ -837,6 +860,10 @@ static int compact_finished(struct zone *zone,
 
        /* Compaction run completes if the migrate and free scanner meet */
        if (cc->free_pfn <= cc->migrate_pfn) {
+               /* Let the next compaction start anew. */
+               zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+               zone->compact_cached_free_pfn = zone_end_pfn(zone);
+
                /*
                 * Mark that the PG_migrate_skip information should be cleared
                 * by kswapd when it goes to sleep. kswapd does not set the
@@ -946,6 +973,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                ;
        }
 
+       /*
+        * Clear pageblock skip if there were failures recently and compaction
+        * is about to be retried after being deferred. kswapd does not do
+        * this reset as it'll reset the cached information when going to sleep.
+        */
+       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+               __reset_isolation_suitable(zone);
+
        /*
         * Setup to move all movable pages to the end of the zone. Used cached
         * information on where the scanners should start but check that it
@@ -962,13 +997,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                zone->compact_cached_migrate_pfn = cc->migrate_pfn;
        }
 
-       /*
-        * Clear pageblock skip if there were failures recently and compaction
-        * is about to be retried after being deferred. kswapd does not do
-        * this reset as it'll reset the cached information when going to sleep.
-        */
-       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
-               __reset_isolation_suitable(zone);
+       trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
 
        migrate_prep_local();
 
@@ -1003,7 +1032,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
-                       if (err == -ENOMEM) {
+                       /*
+                        * migrate_pages() may return -ENOMEM when scanners meet
+                        * and we want compact_finished() to detect it
+                        */
+                       if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
                                ret = COMPACT_PARTIAL;
                                goto out;
                        }
@@ -1015,6 +1048,8 @@ out:
        cc->nr_freepages -= release_freepages(&cc->freepages);
        VM_BUG_ON(cc->nr_freepages != 0);
 
+       trace_mm_compaction_end(ret);
+
        return ret;
 }
 
@@ -1120,12 +1155,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        compact_zone(zone, cc);
 
                if (cc->order > 0) {
-                       int ok = zone_watermark_ok(zone, cc->order,
-                                               low_wmark_pages(zone), 0, 0);
-                       if (ok && cc->order >= zone->compact_order_failed)
-                               zone->compact_order_failed = cc->order + 1;
+                       if (zone_watermark_ok(zone, cc->order,
+                                               low_wmark_pages(zone), 0, 0))
+                               compaction_defer_reset(zone, cc->order, false);
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->sync)
+                       else if (cc->sync)
                                defer_compaction(zone, cc->order);
                }