]> Pileus Git - ~andy/linux/blobdiff - mm/page_alloc.c
mm: move definition for LRU isolation modes to a header
[~andy/linux] / mm / page_alloc.c
index a6326c71b663802fab4972b566a3e510e91bff78..cefe6fe8d991b32b1b0e66c86953d92e00c39aea 100644 (file)
@@ -475,6 +475,8 @@ static inline void __free_one_page(struct page *page,
                int migratetype)
 {
        unsigned long page_idx;
+       unsigned long combined_idx;
+       struct page *buddy;
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
@@ -488,9 +490,6 @@ static inline void __free_one_page(struct page *page,
        VM_BUG_ON(bad_range(zone, page));
 
        while (order < MAX_ORDER-1) {
-               unsigned long combined_idx;
-               struct page *buddy;
-
                buddy = __page_find_buddy(page, page_idx, order);
                if (!page_is_buddy(page, buddy, order))
                        break;
@@ -505,8 +504,29 @@ static inline void __free_one_page(struct page *page,
                order++;
        }
        set_page_order(page, order);
-       list_add(&page->lru,
-               &zone->free_area[order].free_list[migratetype]);
+
+       /*
+        * If this is not the largest possible page, check if the buddy
+        * of the next-highest order is free. If it is, it's possible
+        * that pages are being freed that will coalesce soon. In case,
+        * that is happening, add the free page to the tail of the list
+        * so it's less likely to be used soon and more likely to be merged
+        * as a higher order page
+        */
+       if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+               struct page *higher_page, *higher_buddy;
+               combined_idx = __find_combined_index(page_idx, order);
+               higher_page = page + combined_idx - page_idx;
+               higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+               if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+                       list_add_tail(&page->lru,
+                               &zone->free_area[order].free_list[migratetype]);
+                       goto out;
+               }
+       }
+
+       list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+out:
        zone->free_area[order].nr_free++;
 }
 
@@ -1970,10 +1990,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
+       get_mems_allowed();
        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
-       if (!preferred_zone)
+       if (!preferred_zone) {
+               put_mems_allowed();
                return NULL;
+       }
 
        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -1983,6 +2006,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
+       put_mems_allowed();
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;
@@ -2582,7 +2606,7 @@ static int default_zonelist_order(void)
          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
         * If they are really small and used heavily, the system can fall
         * into OOM very easily.
-        * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
+        * This function detect ZONE_DMA/DMA32 size and configures zone order.
         */
        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
        low_kmem_size = 0;
@@ -2594,6 +2618,15 @@ static int default_zonelist_order(void)
                                if (zone_type < ZONE_NORMAL)
                                        low_kmem_size += z->present_pages;
                                total_size += z->present_pages;
+                       } else if (zone_type == ZONE_NORMAL) {
+                               /*
+                                * If any node has only lowmem, then node order
+                                * is preferred to allow kernel allocations
+                                * locally; otherwise, they can easily infringe
+                                * on other nodes when there is an abundance of
+                                * lowmem available to allocate from.
+                                */
+                               return ZONELIST_ORDER_NODE;
                        }
                }
        }