void putback_lru_page(struct page *page)
{
int lru;
- int active = !!TestClearPageActive(page);
int was_unevictable = PageUnevictable(page);
VM_BUG_ON(PageLRU(page));
* unevictable page on [in]active list.
* We know how to handle that.
*/
- lru = active + page_lru_base_type(page);
- lru_cache_add_lru(page, lru);
+ lru = page_lru_base_type(page);
+ lru_cache_add(page);
} else {
/*
* Put unevictable pages directly on zone's unevictable
static void page_check_dirty_writeback(struct page *page,
bool *dirty, bool *writeback)
{
+ struct address_space *mapping;
+
/*
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
/* By default assume that the page flags are accurate */
*dirty = PageDirty(page);
*writeback = PageWriteback(page);
+
+ /* Verify dirty/writeback state if the filesystem supports it */
+ if (!page_has_private(page))
+ return;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops->is_dirty_writeback)
+ mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
}
/*
* as there is no guarantee the dirtying process is throttled in the
* same way balance_dirty_pages() manages.
*
- * This scales the number of dirty pages that must be under writeback
- * before a zone gets flagged ZONE_WRITEBACK. It is a simple backoff
- * function that has the most effect in the range DEF_PRIORITY to
- * DEF_PRIORITY-2 which is the priority reclaim is considered to be
- * in trouble and reclaim is considered to be in trouble.
- *
- * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
- * DEF_PRIORITY-1 50% must be PageWriteback
- * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
- * ...
- * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
- * isolated page is PageWriteback
- *
* Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
* of pages under pages flagged for immediate reclaim and stall if any
* are encountered in the nr_immediate check below.
*/
- if (nr_writeback && nr_writeback >=
- (nr_taken >> (DEF_PRIORITY - sc->priority)))
+ if (nr_writeback && nr_writeback == nr_taken)
zone_set_flag(zone, ZONE_WRITEBACK);
/*
aborted_reclaim = shrink_zones(zonelist, sc);
/*
- * Don't shrink slabs when reclaiming memory from
- * over limit cgroups
+ * Don't shrink slabs when reclaiming memory from over limit
+ * cgroups but do shrink slab at least once when aborting
+ * reclaim for compaction to avoid unevenly scanning file/anon
+ * LRU pages over slab pages.
*/
if (global_reclaim(sc)) {
unsigned long lru_pages = 0;
WB_REASON_TRY_TO_FREE_PAGES);
sc->may_writepage = 1;
}
- } while (--sc->priority >= 0);
+ } while (--sc->priority >= 0 && !aborted_reclaim);
out:
delayacct_freepages_end();