]> Pileus Git - ~andy/linux/blobdiff - mm/vmscan.c
asm-generic, mm: pgtable: consolidate zero page helpers
[~andy/linux] / mm / vmscan.c
index cbf84e152f043c82b15d754a9f8c2c4c50866488..7f3096137b8a4dc288ba509a98429a1b048bc55f 100644 (file)
@@ -1679,13 +1679,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 
        if (global_reclaim(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
-               /* If we have very few page cache pages,
-                  force-scan anon pages. */
                if (unlikely(file + free <= high_wmark_pages(zone))) {
+                       /*
+                        * If we have very few page cache pages, force-scan
+                        * anon pages.
+                        */
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
                        goto out;
+               } else if (!inactive_file_is_low_global(zone)) {
+                       /*
+                        * There is enough inactive page cache, do not
+                        * reclaim anything from the working set right now.
+                        */
+                       fraction[0] = 0;
+                       fraction[1] = 1;
+                       denominator = 1;
+                       goto out;
                }
        }
 
@@ -1752,7 +1763,7 @@ out:
 /* Use reclaim/compaction for costly allocs or under memory pressure */
 static bool in_reclaim_compaction(struct scan_control *sc)
 {
-       if (COMPACTION_BUILD && sc->order &&
+       if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
                        (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
                         sc->priority < DEF_PRIORITY - 2))
                return true;
@@ -2005,7 +2016,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                        if (zone->all_unreclaimable &&
                                        sc->priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
-                       if (COMPACTION_BUILD) {
+                       if (IS_ENABLED(CONFIG_COMPACTION)) {
                                /*
                                 * If we already have plenty of memory free for
                                 * compaction in this zone, don't free any more.
@@ -2414,6 +2425,20 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
        } while (memcg);
 }
 
+static bool zone_balanced(struct zone *zone, int order,
+                         unsigned long balance_gap, int classzone_idx)
+{
+       if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+                                   balance_gap, classzone_idx, 0))
+               return false;
+
+       if (IS_ENABLED(CONFIG_COMPACTION) && order &&
+           !compaction_suitable(zone, order))
+               return false;
+
+       return true;
+}
+
 /*
  * pgdat_balanced is used when checking if a node is balanced for high-order
  * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2492,8 +2517,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                        continue;
                }
 
-               if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-                                                       i, 0))
+               if (!zone_balanced(zone, order, 0, i))
                        all_zones_ok = false;
                else
                        balanced += zone->present_pages;
@@ -2602,8 +2626,7 @@ loop_again:
                                break;
                        }
 
-                       if (!zone_watermark_ok_safe(zone, order,
-                                       high_wmark_pages(zone), 0, 0)) {
+                       if (!zone_balanced(zone, order, 0, 0)) {
                                end_zone = i;
                                break;
                        } else {
@@ -2673,15 +2696,14 @@ loop_again:
                         * Do not reclaim more than needed for compaction.
                         */
                        testorder = order;
-                       if (COMPACTION_BUILD && order &&
+                       if (IS_ENABLED(CONFIG_COMPACTION) && order &&
                                        compaction_suitable(zone, order) !=
                                                COMPACT_SKIPPED)
                                testorder = 0;
 
                        if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
-                                   !zone_watermark_ok_safe(zone, testorder,
-                                       high_wmark_pages(zone) + balance_gap,
-                                       end_zone, 0)) {
+                           !zone_balanced(zone, testorder,
+                                          balance_gap, end_zone)) {
                                shrink_zone(zone, &sc);
 
                                reclaim_state->reclaimed_slab = 0;
@@ -2708,8 +2730,7 @@ loop_again:
                                continue;
                        }
 
-                       if (!zone_watermark_ok_safe(zone, testorder,
-                                       high_wmark_pages(zone), end_zone, 0)) {
+                       if (!zone_balanced(zone, testorder, 0, end_zone)) {
                                all_zones_ok = 0;
                                /*
                                 * We are still under min water mark.  This
@@ -2814,29 +2835,10 @@ out:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable &&
-                           sc.priority != DEF_PRIORITY)
-                               continue;
-
-                       /* Would compaction fail due to lack of free memory? */
-                       if (COMPACTION_BUILD &&
-                           compaction_suitable(zone, order) == COMPACT_SKIPPED)
-                               goto loop_again;
-
-                       /* Confirm the zone is balanced for order-0 */
-                       if (!zone_watermark_ok(zone, 0,
-                                       high_wmark_pages(zone), 0, 0)) {
-                               order = sc.order = 0;
-                               goto loop_again;
-                       }
-
                        /* Check if the memory needs to be defragmented. */
                        if (zone_watermark_ok(zone, order,
                                    low_wmark_pages(zone), *classzone_idx, 0))
                                zones_need_compaction = 0;
-
-                       /* If balanced, clear the congested flag */
-                       zone_clear_flag(zone, ZONE_CONGESTED);
                }
 
                if (zones_need_compaction)
@@ -2961,7 +2963,7 @@ static int kswapd(void *p)
        classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
        balanced_classzone_idx = classzone_idx;
        for ( ; ; ) {
-               int ret;
+               bool ret;
 
                /*
                 * If the last balance_pgdat was unsuccessful it's unlikely a
@@ -3129,7 +3131,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        int nid;
 
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
-               for_each_node_state(nid, N_HIGH_MEMORY) {
+               for_each_node_state(nid, N_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
                        const struct cpumask *mask;
 
@@ -3185,7 +3187,7 @@ static int __init kswapd_init(void)
        int nid;
 
        swap_setup();
-       for_each_node_state(nid, N_HIGH_MEMORY)
+       for_each_node_state(nid, N_MEMORY)
                kswapd_run(nid);
        hotcpu_notifier(cpu_callback, 0);
        return 0;