]> Pileus Git - ~andy/linux/blobdiff - mm/vmscan.c
Bluetooth: Fix setting local name to the existing value
[~andy/linux] / mm / vmscan.c
index 463990941a78bdfcaabea5682e2e4336aa3ab39d..88c5fed8b9a4bd9a54021810cf05710cff4fcbfc 100644 (file)
@@ -128,7 +128,7 @@ struct scan_control {
  * From 0 .. 100.  Higher means more swappy.
  */
 int vm_swappiness = 60;
-long vm_total_pages;   /* The total number of pages which the VM controls */
+unsigned long vm_total_pages;  /* The total number of pages which the VM controls */
 
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
@@ -1579,16 +1579,6 @@ static inline int inactive_anon_is_low(struct lruvec *lruvec)
 }
 #endif
 
-static int inactive_file_is_low_global(struct zone *zone)
-{
-       unsigned long active, inactive;
-
-       active = zone_page_state(zone, NR_ACTIVE_FILE);
-       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
-
-       return (active > inactive);
-}
-
 /**
  * inactive_file_is_low - check if file pages need to be deactivated
  * @lruvec: LRU vector to check
@@ -1605,10 +1595,13 @@ static int inactive_file_is_low_global(struct zone *zone)
  */
 static int inactive_file_is_low(struct lruvec *lruvec)
 {
-       if (!mem_cgroup_disabled())
-               return mem_cgroup_inactive_file_is_low(lruvec);
+       unsigned long inactive;
+       unsigned long active;
+
+       inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
+       active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
 
-       return inactive_file_is_low_global(lruvec_zone(lruvec));
+       return active > inactive;
 }
 
 static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
@@ -1684,7 +1677,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
                force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+       if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -1933,7 +1926,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
         */
        pages_for_compaction = (2UL << sc->order);
        inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
@@ -2010,7 +2003,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
         * a reasonable chance of completing and allocating the page
         */
        balance_gap = min(low_wmark_pages(zone),
-               (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+               (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
                        KSWAPD_ZONE_BALANCE_GAP_RATIO);
        watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
        watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
@@ -2201,6 +2194,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                if (sc->nr_reclaimed >= sc->nr_to_reclaim)
                        goto out;
 
+               /*
+                * If we're getting trouble reclaiming, start doing
+                * writepage even in laptop mode.
+                */
+               if (sc->priority < DEF_PRIORITY - 2)
+                       sc->may_writepage = 1;
+
                /*
                 * Try to write back as many pages as we just scanned.  This
                 * tends to cause slow streaming writers to write data to the
@@ -2352,7 +2352,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 {
        unsigned long nr_reclaimed;
        struct scan_control sc = {
-               .gfp_mask = gfp_mask,
+               .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
                .may_writepage = !laptop_mode,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
@@ -2525,7 +2525,7 @@ static bool zone_balanced(struct zone *zone, int order,
  */
 static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
 {
-       unsigned long present_pages = 0;
+       unsigned long managed_pages = 0;
        unsigned long balanced_pages = 0;
        int i;
 
@@ -2536,7 +2536,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
                if (!populated_zone(zone))
                        continue;
 
-               present_pages += zone->present_pages;
+               managed_pages += zone->managed_pages;
 
                /*
                 * A special case here:
@@ -2546,18 +2546,18 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
                 * they must be considered balanced here as well!
                 */
                if (zone->all_unreclaimable) {
-                       balanced_pages += zone->present_pages;
+                       balanced_pages += zone->managed_pages;
                        continue;
                }
 
                if (zone_balanced(zone, order, 0, i))
-                       balanced_pages += zone->present_pages;
+                       balanced_pages += zone->managed_pages;
                else if (!order)
                        return false;
        }
 
        if (order)
-               return balanced_pages >= (present_pages >> 2);
+               return balanced_pages >= (managed_pages >> 2);
        else
                return true;
 }
@@ -2616,7 +2616,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
 static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                                                        int *classzone_idx)
 {
-       struct zone *unbalanced_zone;
+       bool pgdat_is_balanced = false;
        int i;
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
@@ -2647,9 +2647,6 @@ loop_again:
 
        do {
                unsigned long lru_pages = 0;
-               int has_under_min_watermark_zone = 0;
-
-               unbalanced_zone = NULL;
 
                /*
                 * Scan in the highmem->dma direction for the highest
@@ -2690,8 +2687,11 @@ loop_again:
                                zone_clear_flag(zone, ZONE_CONGESTED);
                        }
                }
-               if (i < 0)
+
+               if (i < 0) {
+                       pgdat_is_balanced = true;
                        goto out;
+               }
 
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
@@ -2741,7 +2741,7 @@ loop_again:
                         * of the zone, whichever is smaller.
                         */
                        balance_gap = min(low_wmark_pages(zone),
-                               (zone->present_pages +
+                               (zone->managed_pages +
                                        KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
                        /*
@@ -2772,12 +2772,10 @@ loop_again:
                        }
 
                        /*
-                        * If we've done a decent amount of scanning and
-                        * the reclaim ratio is low, start doing writepage
-                        * even in laptop mode
+                        * If we're getting trouble reclaiming, start doing
+                        * writepage even in laptop mode.
                         */
-                       if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
-                           total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
+                       if (sc.priority < DEF_PRIORITY - 2)
                                sc.may_writepage = 1;
 
                        if (zone->all_unreclaimable) {
@@ -2786,17 +2784,7 @@ loop_again:
                                continue;
                        }
 
-                       if (!zone_balanced(zone, testorder, 0, end_zone)) {
-                               unbalanced_zone = zone;
-                               /*
-                                * We are still under min water mark.  This
-                                * means that we have a GFP_ATOMIC allocation
-                                * failure risk. Hurry up!
-                                */
-                               if (!zone_watermark_ok_safe(zone, order,
-                                           min_wmark_pages(zone), end_zone, 0))
-                                       has_under_min_watermark_zone = 1;
-                       } else {
+                       if (zone_balanced(zone, testorder, 0, end_zone))
                                /*
                                 * If a zone reaches its high watermark,
                                 * consider it to be no longer congested. It's
@@ -2805,8 +2793,6 @@ loop_again:
                                 * speculatively avoid congestion waits
                                 */
                                zone_clear_flag(zone, ZONE_CONGESTED);
-                       }
-
                }
 
                /*
@@ -2818,17 +2804,9 @@ loop_again:
                                pfmemalloc_watermark_ok(pgdat))
                        wake_up(&pgdat->pfmemalloc_wait);
 
-               if (pgdat_balanced(pgdat, order, *classzone_idx))
+               if (pgdat_balanced(pgdat, order, *classzone_idx)) {
+                       pgdat_is_balanced = true;
                        break;          /* kswapd: all done */
-               /*
-                * OK, kswapd is getting into trouble.  Take a nap, then take
-                * another pass across the zones.
-                */
-               if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
-                       if (has_under_min_watermark_zone)
-                               count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
-                       else if (unbalanced_zone)
-                               wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
                }
 
                /*
@@ -2840,9 +2818,9 @@ loop_again:
                if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
        } while (--sc.priority >= 0);
-out:
 
-       if (!pgdat_balanced(pgdat, order, *classzone_idx)) {
+out:
+       if (!pgdat_is_balanced) {
                cond_resched();
 
                try_to_freeze();
@@ -3105,7 +3083,7 @@ unsigned long global_reclaimable_pages(void)
        nr = global_page_state(NR_ACTIVE_FILE) +
             global_page_state(NR_INACTIVE_FILE);
 
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                nr += global_page_state(NR_ACTIVE_ANON) +
                      global_page_state(NR_INACTIVE_ANON);
 
@@ -3119,7 +3097,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
        nr = zone_page_state(zone, NR_ACTIVE_FILE) +
             zone_page_state(zone, NR_INACTIVE_FILE);
 
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                nr += zone_page_state(zone, NR_ACTIVE_ANON) +
                      zone_page_state(zone, NR_INACTIVE_ANON);
 
@@ -3333,7 +3311,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
                .may_swap = 1,
                .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
-               .gfp_mask = gfp_mask,
+               .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
                .order = order,
                .priority = ZONE_RECLAIM_PRIORITY,
        };