]> Pileus Git - ~andy/linux/blobdiff - mm/page_alloc.c
ASoC: cq93vc: Use regmap for I/O
[~andy/linux] / mm / page_alloc.c
index b09ce5fe0cd225ffbcf1e63817bef142b7d00654..0ee638f76ebe584cd40d7541bac886b96b03162e 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/mm_inline.h>
 #include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 #include <linux/hugetlb.h>
@@ -488,8 +489,10 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
  *
- * For recording whether a page is in the buddy system, we set ->_mapcount -2.
- * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE.
+ * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
+ * serialized by zone->lock.
  *
  * For recording page's order, we use page_private(page).
  */
@@ -527,8 +530,9 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  * as necessary, plus some accounting needed to play nicely with other
  * parts of the VM system.
  * At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with _mapcount -2. Page's
- * order is recorded in page_private(page) field.
+ * free pages of length of (1 << order) and marked with _mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
+ * field.
  * So when we are allocating or freeing one, we can derive the state of the
  * other.  That is, if we allocate a small block, and both were
  * free, the remainder of the region must be split into blocks.
@@ -647,7 +651,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        int to_free = count;
 
        spin_lock(&zone->lock);
-       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        while (to_free) {
@@ -696,7 +699,6 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
                                int migratetype)
 {
        spin_lock(&zone->lock);
-       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        __free_one_page(page, zone, order, migratetype);
@@ -751,19 +753,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
        unsigned int nr_pages = 1 << order;
+       struct page *p = page;
        unsigned int loop;
 
-       prefetchw(page);
-       for (loop = 0; loop < nr_pages; loop++) {
-               struct page *p = &page[loop];
-
-               if (loop + 1 < nr_pages)
-                       prefetchw(p + 1);
+       prefetchw(p);
+       for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+               prefetchw(p + 1);
                __ClearPageReserved(p);
                set_page_count(p, 0);
        }
+       __ClearPageReserved(p);
+       set_page_count(p, 0);
 
-       page_zone(page)->managed_pages += 1 << order;
+       page_zone(page)->managed_pages += nr_pages;
        set_page_refcounted(page);
        __free_pages(page, order);
 }
@@ -1101,8 +1103,9 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                               is_migrate_cma(migratetype)
                             ? migratetype : start_migratetype);
 
-                       trace_mm_page_alloc_extfrag(page, order, current_order,
-                               start_migratetype, new_type);
+                       trace_mm_page_alloc_extfrag(page, order,
+                               current_order, start_migratetype, migratetype,
+                               new_type == start_migratetype);
 
                        return page;
                }
@@ -1305,7 +1308,7 @@ void mark_free_pages(struct zone *zone)
        int order, t;
        struct list_head *curr;
 
-       if (!zone->spanned_pages)
+       if (zone_is_empty(zone))
                return;
 
        spin_lock_irqsave(&zone->lock, flags);
@@ -1550,6 +1553,7 @@ again:
                                          get_pageblock_migratetype(page));
        }
 
+       __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
@@ -1816,6 +1820,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 }
 
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+       return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+}
+
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
        return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
@@ -1853,6 +1862,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
 {
 }
 
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+       return true;
+}
+
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
        return true;
@@ -1884,16 +1898,41 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
-        * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+        * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                high_zoneidx, nodemask) {
+               unsigned long mark;
+
                if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
                                continue;
                if ((alloc_flags & ALLOC_CPUSET) &&
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                continue;
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+               if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+                       goto try_this_zone;
+               /*
+                * Distribute pages in proportion to the individual
+                * zone size to ensure fair page aging.  The zone a
+                * page was allocated in should have no effect on the
+                * time the page has in memory before being reclaimed.
+                *
+                * When zone_reclaim_mode is enabled, try to stay in
+                * local zones in the fastpath.  If that fails, the
+                * slowpath is entered, which will do another pass
+                * starting with the local zones, but ultimately fall
+                * back to remote zones that do not partake in the
+                * fairness round-robin cycle of this zonelist.
+                */
+               if (alloc_flags & ALLOC_WMARK_LOW) {
+                       if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+                               continue;
+                       if (zone_reclaim_mode &&
+                           !zone_local(preferred_zone, zone))
+                               continue;
+               }
                /*
                 * When allocating a page cache page for writing, we
                 * want to get it from a zone that is within its dirty
@@ -1924,16 +1963,11 @@ zonelist_scan:
                    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
                        goto this_zone_full;
 
-               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
-               if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
-                       unsigned long mark;
+               mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+               if (!zone_watermark_ok(zone, order, mark,
+                                      classzone_idx, alloc_flags)) {
                        int ret;
 
-                       mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
-                       if (zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags))
-                               goto try_this_zone;
-
                        if (IS_ENABLED(CONFIG_NUMA) &&
                                        !did_zlc_setup && nr_online_nodes > 1) {
                                /*
@@ -2345,16 +2379,30 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
        return page;
 }
 
-static inline
-void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
-                                               enum zone_type high_zoneidx,
-                                               enum zone_type classzone_idx)
+static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
+                            struct zonelist *zonelist,
+                            enum zone_type high_zoneidx,
+                            struct zone *preferred_zone)
 {
        struct zoneref *z;
        struct zone *zone;
 
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-               wakeup_kswapd(zone, order, classzone_idx);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+               if (!(gfp_mask & __GFP_NO_KSWAPD))
+                       wakeup_kswapd(zone, order, zone_idx(preferred_zone));
+               /*
+                * Only reset the batches of zones that were actually
+                * considered in the fast path, we don't want to
+                * thrash fairness information for zones that are not
+                * actually part of this zonelist's round-robin cycle.
+                */
+               if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+                       continue;
+               mod_zone_page_state(zone, NR_ALLOC_BATCH,
+                                   high_wmark_pages(zone) -
+                                   low_wmark_pages(zone) -
+                                   zone_page_state(zone, NR_ALLOC_BATCH));
+       }
 }
 
 static inline int
@@ -2450,9 +2498,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                goto nopage;
 
 restart:
-       if (!(gfp_mask & __GFP_NO_KSWAPD))
-               wake_all_kswapd(order, zonelist, high_zoneidx,
-                                               zone_idx(preferred_zone));
+       prepare_slowpath(gfp_mask, order, zonelist,
+                        high_zoneidx, preferred_zone);
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -3119,7 +3166,7 @@ void show_free_areas(unsigned int filter)
                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
-                       (zone->all_unreclaimable ? "yes" : "no")
+                       (!zone_reclaimable(zone) ? "yes" : "no")
                        );
                printk("lowmem_reserve[]:");
                for (i = 0; i < MAX_NR_ZONES; i++)
@@ -4261,7 +4308,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
 int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
        unsigned long start_pfn, end_pfn;
-       int i, nid;
+       int nid;
        /*
         * NOTE: The following SMP-unsafe globals are only used early in boot
         * when the kernel is running single-threaded.
@@ -4272,15 +4319,14 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
        if (last_start_pfn <= pfn && pfn < last_end_pfn)
                return last_nid;
 
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
-               if (start_pfn <= pfn && pfn < end_pfn) {
-                       last_start_pfn = start_pfn;
-                       last_end_pfn = end_pfn;
-                       last_nid = nid;
-                       return nid;
-               }
-       /* This is a memory hole */
-       return -1;
+       nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+       if (nid != -1) {
+               last_start_pfn = start_pfn;
+               last_end_pfn = end_pfn;
+               last_nid = nid;
+       }
+
+       return nid;
 }
 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 
@@ -4752,8 +4798,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                spin_lock_init(&zone->lru_lock);
                zone_seqlock_init(zone);
                zone->zone_pgdat = pgdat;
-
                zone_pcp_init(zone);
+
+               /* For bootup, initialized properly in watermark setup */
+               mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
+
                lruvec_init(&zone->lruvec);
                if (!size)
                        continue;
@@ -5387,7 +5436,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
                 * This is only okay since the processor is dead and cannot
                 * race with what we are doing.
                 */
-               refresh_cpu_vm_stats(cpu);
+               cpu_vm_stats_fold(cpu);
        }
        return NOTIFY_OK;
 }
@@ -5524,6 +5573,11 @@ static void __setup_per_zone_wmarks(void)
                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
 
+               __mod_zone_page_state(zone, NR_ALLOC_BATCH,
+                                     high_wmark_pages(zone) -
+                                     low_wmark_pages(zone) -
+                                     zone_page_state(zone, NR_ALLOC_BATCH));
+
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -5955,6 +6009,17 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        continue;
 
                page = pfn_to_page(check);
+
+               /*
+                * Hugepages are not in LRU lists, but they're movable.
+                * We need not scan over tail pages bacause we don't
+                * handle each tail page individually in migration.
+                */
+               if (PageHuge(page)) {
+                       iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+                       continue;
+               }
+
                /*
                 * We can't use page_count without pin a page
                 * because another CPU can free compound page.