]> Pileus Git - ~andy/linux/blobdiff - mm/page_alloc.c
ASoC: format_register_str: Don't clip register values
[~andy/linux] / mm / page_alloc.c
index 2aaafe82f513dcd9820c5adb08bfb6a18d42615b..d6e7ba7373be7caea9f12eb73bf994df5ddc7467 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/compaction.h>
 #include <trace/events/kmem.h>
 #include <linux/ftrace_event.h>
+#include <linux/memcontrol.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page)
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
                (atomic_read(&page->_count) != 0) |
-               (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
+               (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
+               (mem_cgroup_bad_page_check(page)))) {
                bad_page(page);
                return 1;
        }
@@ -614,6 +616,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        list = &pcp->lists[migratetype];
                } while (list_empty(list));
 
+               /* This is the only non-empty list. Free them all. */
+               if (batch_free == MIGRATE_PCPTYPES)
+                       batch_free = to_free;
+
                do {
                        page = list_entry(list->prev, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
@@ -750,7 +756,8 @@ static inline int check_new_page(struct page *page)
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
                (atomic_read(&page->_count) != 0)  |
-               (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
+               (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
+               (mem_cgroup_bad_page_check(page)))) {
                bad_page(page);
                return 1;
        }
@@ -863,9 +870,8 @@ static int move_freepages(struct zone *zone,
                }
 
                order = page_order(page);
-               list_del(&page->lru);
-               list_add(&page->lru,
-                       &zone->free_area[order].free_list[migratetype]);
+               list_move(&page->lru,
+                         &zone->free_area[order].free_list[migratetype]);
                page += 1 << order;
                pages_moved += 1 << order;
        }
@@ -1333,7 +1339,7 @@ again:
        }
 
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
-       zone_statistics(preferred_zone, zone);
+       zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
 
        VM_BUG_ON(bad_range(zone, page));
@@ -2099,7 +2105,7 @@ rebalance:
                                        sync_migration);
        if (page)
                goto got_pg;
-       sync_migration = true;
+       sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
 
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2171,12 +2177,25 @@ rebalance:
 
 nopage:
        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
-               printk(KERN_WARNING "%s: page allocation failure."
-                       " order:%d, mode:0x%x\n",
+               unsigned int filter = SHOW_MEM_FILTER_NODES;
+
+               /*
+                * This documents exceptions given to allocations in certain
+                * contexts that are allowed to allocate outside current's set
+                * of allowed nodes.
+                */
+               if (!(gfp_mask & __GFP_NOMEMALLOC))
+                       if (test_thread_flag(TIF_MEMDIE) ||
+                           (current->flags & (PF_MEMALLOC | PF_EXITING)))
+                               filter &= ~SHOW_MEM_FILTER_NODES;
+               if (in_interrupt() || !wait)
+                       filter &= ~SHOW_MEM_FILTER_NODES;
+
+               pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
                        current->comm, order, gfp_mask);
                dump_stack();
                if (!should_suppress_show_mem())
-                       show_mem();
+                       show_mem(filter);
        }
        return page;
 got_pg:
@@ -5668,4 +5687,5 @@ void dump_page(struct page *page)
                page, atomic_read(&page->_count), page_mapcount(page),
                page->mapping, page->index);
        dump_page_flags(page->flags);
+       mem_cgroup_print_bad_page(page);
 }