]> Pileus Git - ~andy/linux/blobdiff - mm/memcontrol.c
sfc: Implement generic features interface
[~andy/linux] / mm / memcontrol.c
index 9dfbed2aacc9b6b5fa43e4a8ef9d007b0fde3418..1f0b460fe58c5308caae219343e415ec75f59f0d 100644 (file)
@@ -73,15 +73,6 @@ static int really_do_swap_account __initdata = 0;
 #define do_swap_account                (0)
 #endif
 
-/*
- * Per memcg event counter is incremented at every pagein/pageout. This counter
- * is used for trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- *
- * These values will be used as !((event) & ((1 <<(thresh)) - 1))
- */
-#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
-#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
 
 /*
  * Statistics for memory cgroup.
@@ -93,19 +84,36 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
        MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
-       MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
-       MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
        MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
-       /* incremented at every  pagein/pageout */
-       MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
        MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
-
        MEM_CGROUP_STAT_NSTATS,
 };
 
+enum mem_cgroup_events_index {
+       MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
+       MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
+       MEM_CGROUP_EVENTS_COUNT,        /* # of pages paged in/out */
+       MEM_CGROUP_EVENTS_NSTATS,
+};
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+       MEM_CGROUP_TARGET_THRESH,
+       MEM_CGROUP_TARGET_SOFTLIMIT,
+       MEM_CGROUP_NTARGETS,
+};
+#define THRESHOLDS_EVENTS_TARGET (128)
+#define SOFTLIMIT_EVENTS_TARGET (1024)
+
 struct mem_cgroup_stat_cpu {
-       s64 count[MEM_CGROUP_STAT_NSTATS];
+       long count[MEM_CGROUP_STAT_NSTATS];
+       unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+       unsigned long targets[MEM_CGROUP_NTARGETS];
 };
 
 /*
@@ -543,11 +551,11 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  * common workload, threashold and synchonization as vmstat[] should be
  * implemented.
  */
-static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
-               enum mem_cgroup_stat_index idx)
+static long mem_cgroup_read_stat(struct mem_cgroup *mem,
+                                enum mem_cgroup_stat_index idx)
 {
+       long val = 0;
        int cpu;
-       s64 val = 0;
 
        get_online_cpus();
        for_each_online_cpu(cpu)
@@ -561,9 +569,9 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
        return val;
 }
 
-static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
+static long mem_cgroup_local_usage(struct mem_cgroup *mem)
 {
-       s64 ret;
+       long ret;
 
        ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
        ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
@@ -577,6 +585,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
        this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 }
 
+static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
+                                           enum mem_cgroup_events_index idx)
+{
+       unsigned long val = 0;
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               val += per_cpu(mem->stat->events[idx], cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+       spin_lock(&mem->pcp_counter_lock);
+       val += mem->nocpu_base.events[idx];
+       spin_unlock(&mem->pcp_counter_lock);
+#endif
+       return val;
+}
+
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
                                         bool file, int nr_pages)
 {
@@ -589,13 +613,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
 
        /* pagein of a big page is an event. So, ignore page size */
        if (nr_pages > 0)
-               __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
+               __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
        else {
-               __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+               __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
                nr_pages = -nr_pages; /* for event */
        }
 
-       __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
+       __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
 
        preempt_enable();
 }
@@ -615,13 +639,34 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
        return total;
 }
 
-static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
+static bool __memcg_event_check(struct mem_cgroup *mem, int target)
 {
-       s64 val;
+       unsigned long val, next;
+
+       val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+       next = this_cpu_read(mem->stat->targets[target]);
+       /* from time_after() in jiffies.h */
+       return ((long)next - (long)val < 0);
+}
+
+static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
+{
+       unsigned long val, next;
+
+       val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
 
-       val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
+       switch (target) {
+       case MEM_CGROUP_TARGET_THRESH:
+               next = val + THRESHOLDS_EVENTS_TARGET;
+               break;
+       case MEM_CGROUP_TARGET_SOFTLIMIT:
+               next = val + SOFTLIMIT_EVENTS_TARGET;
+               break;
+       default:
+               return;
+       }
 
-       return !(val & ((1 << event_mask_shift) - 1));
+       this_cpu_write(mem->stat->targets[target], next);
 }
 
 /*
@@ -631,10 +676,15 @@ static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
 {
        /* threshold event is triggered in finer grain than soft limit */
-       if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
+       if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
                mem_cgroup_threshold(mem);
-               if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
+               __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
+               if (unlikely(__memcg_event_check(mem,
+                       MEM_CGROUP_TARGET_SOFTLIMIT))){
                        mem_cgroup_update_tree(mem, page);
+                       __mem_cgroup_target_update(mem,
+                               MEM_CGROUP_TARGET_SOFTLIMIT);
+               }
        }
 }
 
@@ -876,18 +926,28 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
 }
 
 /*
- * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
- * lru because the page may.be reused after it's fully uncharged (because of
- * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
- * it again. This function is only used to charge SwapCache. It's done under
- * lock_page and expected that zone->lru_lock is never held.
+ * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
+ * while it's linked to lru because the page may be reused after it's fully
+ * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
+ * It's done under lock_page and expected that zone->lru_lock isnever held.
  */
-static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
+static void mem_cgroup_lru_del_before_commit(struct page *page)
 {
        unsigned long flags;
        struct zone *zone = page_zone(page);
        struct page_cgroup *pc = lookup_page_cgroup(page);
 
+       /*
+        * Doing this check without taking ->lru_lock seems wrong but this
+        * is safe. Because if page_cgroup's USED bit is unset, the page
+        * will not be added to any memcg's LRU. If page_cgroup's USED bit is
+        * set, the commit after this will fail, anyway.
+        * This all charge/uncharge is done under some mutual execustion.
+        * So, we don't need to taking care of changes in USED bit.
+        */
+       if (likely(!PageLRU(page)))
+               return;
+
        spin_lock_irqsave(&zone->lru_lock, flags);
        /*
         * Forget old LRU when this page_cgroup is *not* used. This Used bit
@@ -898,12 +958,15 @@ static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
        spin_unlock_irqrestore(&zone->lru_lock, flags);
 }
 
-static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
+static void mem_cgroup_lru_add_after_commit(struct page *page)
 {
        unsigned long flags;
        struct zone *zone = page_zone(page);
        struct page_cgroup *pc = lookup_page_cgroup(page);
 
+       /* taking care of that the page is added to LRU while we commit it */
+       if (likely(!PageLRU(page)))
+               return;
        spin_lock_irqsave(&zone->lru_lock, flags);
        /* link when the page is linked to LRU but page_cgroup isn't */
        if (PageLRU(page) && !PageCgroupAcctLRU(pc))
@@ -1109,16 +1172,16 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  * @mem: the memory cgroup
  *
  * Returns the maximum amount of memory @mem can be charged with, in
- * bytes.
+ * pages.
  */
-static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem)
+static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
 {
        unsigned long long margin;
 
        margin = res_counter_margin(&mem->res);
        if (do_swap_account)
                margin = min(margin, res_counter_margin(&mem->memsw));
-       return margin;
+       return margin >> PAGE_SHIFT;
 }
 
 static unsigned int get_swappiness(struct mem_cgroup *memcg)
@@ -1647,7 +1710,7 @@ EXPORT_SYMBOL(mem_cgroup_update_page_stat);
  * size of first charge trial. "32" comes from vmscan.c's magic value.
  * TODO: maybe necessary to use big numbers in big irons.
  */
-#define CHARGE_SIZE    (32 * PAGE_SIZE)
+#define CHARGE_BATCH   32U
 struct memcg_stock_pcp {
        struct mem_cgroup *cached; /* this never be root cgroup */
        unsigned int nr_pages;
@@ -1768,11 +1831,17 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
 
        spin_lock(&mem->pcp_counter_lock);
        for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
-               s64 x = per_cpu(mem->stat->count[i], cpu);
+               long x = per_cpu(mem->stat->count[i], cpu);
 
                per_cpu(mem->stat->count[i], cpu) = 0;
                mem->nocpu_base.count[i] += x;
        }
+       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+               unsigned long x = per_cpu(mem->stat->events[i], cpu);
+
+               per_cpu(mem->stat->events[i], cpu) = 0;
+               mem->nocpu_base.events[i] += x;
+       }
        /* need to clear ON_MOVE value, works as a kind of lock. */
        per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
        spin_unlock(&mem->pcp_counter_lock);
@@ -1822,9 +1891,10 @@ enum {
        CHARGE_OOM_DIE,         /* the current is killed because of OOM */
 };
 
-static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
-                               int csize, bool oom_check)
+static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
+                               unsigned int nr_pages, bool oom_check)
 {
+       unsigned long csize = nr_pages * PAGE_SIZE;
        struct mem_cgroup *mem_over_limit;
        struct res_counter *fail_res;
        unsigned long flags = 0;
@@ -1845,14 +1915,13 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
        } else
                mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
        /*
-        * csize can be either a huge page (HPAGE_SIZE), a batch of
-        * regular pages (CHARGE_SIZE), or a single regular page
-        * (PAGE_SIZE).
+        * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
+        * of regular pages (CHARGE_BATCH), or a single regular page (1).
         *
         * Never reclaim on behalf of optional batching, retry with a
         * single page instead.
         */
-       if (csize == CHARGE_SIZE)
+       if (nr_pages == CHARGE_BATCH)
                return CHARGE_RETRY;
 
        if (!(gfp_mask & __GFP_WAIT))
@@ -1860,7 +1929,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
 
        ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
                                              gfp_mask, flags);
-       if (mem_cgroup_margin(mem_over_limit) >= csize)
+       if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
                return CHARGE_RETRY;
        /*
         * Even though the limit is exceeded at this point, reclaim
@@ -1871,7 +1940,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
         * unlikely to succeed so close to the limit, and we fall back
         * to regular pages anyway in case of failure.
         */
-       if (csize == PAGE_SIZE && ret)
+       if (nr_pages == 1 && ret)
                return CHARGE_RETRY;
 
        /*
@@ -1897,13 +1966,14 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                   gfp_t gfp_mask,
-                                  struct mem_cgroup **memcg, bool oom,
-                                  int page_size)
+                                  unsigned int nr_pages,
+                                  struct mem_cgroup **memcg,
+                                  bool oom)
 {
+       unsigned int batch = max(CHARGE_BATCH, nr_pages);
        int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup *mem = NULL;
        int ret;
-       int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
        /*
         * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1928,7 +1998,7 @@ again:
                VM_BUG_ON(css_is_removed(&mem->css));
                if (mem_cgroup_is_root(mem))
                        goto done;
-               if (page_size == PAGE_SIZE && consume_stock(mem))
+               if (nr_pages == 1 && consume_stock(mem))
                        goto done;
                css_get(&mem->css);
        } else {
@@ -1951,7 +2021,7 @@ again:
                        rcu_read_unlock();
                        goto done;
                }
-               if (page_size == PAGE_SIZE && consume_stock(mem)) {
+               if (nr_pages == 1 && consume_stock(mem)) {
                        /*
                         * It seems dagerous to access memcg without css_get().
                         * But considering how consume_stok works, it's not
@@ -1986,13 +2056,12 @@ again:
                        nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
                }
 
-               ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
-
+               ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
                switch (ret) {
                case CHARGE_OK:
                        break;
                case CHARGE_RETRY: /* not in OOM situation but retry */
-                       csize = page_size;
+                       batch = nr_pages;
                        css_put(&mem->css);
                        mem = NULL;
                        goto again;
@@ -2013,8 +2082,8 @@ again:
                }
        } while (ret != CHARGE_OK);
 
-       if (csize > page_size)
-               refill_stock(mem, (csize - page_size) >> PAGE_SHIFT);
+       if (batch > nr_pages)
+               refill_stock(mem, batch - nr_pages);
        css_put(&mem->css);
 done:
        *memcg = mem;
@@ -2093,12 +2162,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
                                       struct page *page,
+                                      unsigned int nr_pages,
                                       struct page_cgroup *pc,
-                                      enum charge_type ctype,
-                                      int page_size)
+                                      enum charge_type ctype)
 {
-       int nr_pages = page_size >> PAGE_SHIFT;
-
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
@@ -2187,26 +2254,28 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
 /**
  * mem_cgroup_move_account - move account of the page
  * @page: the page
+ * @nr_pages: number of regular pages (>1 for huge pages)
  * @pc:        page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:        mem_cgroup which the page is moved to. @from != @to.
  * @uncharge: whether we should call uncharge and css_put against @from.
- * @charge_size: number of bytes to charge (regular or huge page)
  *
  * The caller must confirm following.
  * - page is not on LRU (isolate_page() is useful.)
- * - compound_lock is held when charge_size > PAGE_SIZE
+ * - compound_lock is held when nr_pages > 1
  *
  * This function doesn't do "charge" nor css_get to new cgroup. It should be
  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
  * true, this function does "uncharge" from old cgroup, but it doesn't if
  * @uncharge is false, so a caller should do "uncharge".
  */
-static int mem_cgroup_move_account(struct page *page, struct page_cgroup *pc,
-                                  struct mem_cgroup *from, struct mem_cgroup *to,
-                                  bool uncharge, int charge_size)
+static int mem_cgroup_move_account(struct page *page,
+                                  unsigned int nr_pages,
+                                  struct page_cgroup *pc,
+                                  struct mem_cgroup *from,
+                                  struct mem_cgroup *to,
+                                  bool uncharge)
 {
-       int nr_pages = charge_size >> PAGE_SHIFT;
        unsigned long flags;
        int ret;
 
@@ -2219,7 +2288,7 @@ static int mem_cgroup_move_account(struct page *page, struct page_cgroup *pc,
         * hold it.
         */
        ret = -EBUSY;
-       if (charge_size > PAGE_SIZE && !PageTransHuge(page))
+       if (nr_pages > 1 && !PageTransHuge(page))
                goto out;
 
        lock_page_cgroup(pc);
@@ -2277,8 +2346,8 @@ static int mem_cgroup_move_parent(struct page *page,
        struct cgroup *cg = child->css.cgroup;
        struct cgroup *pcg = cg->parent;
        struct mem_cgroup *parent;
-       int page_size = PAGE_SIZE;
-       unsigned long flags;
+       unsigned int nr_pages;
+       unsigned long uninitialized_var(flags);
        int ret;
 
        /* Is ROOT ? */
@@ -2291,23 +2360,21 @@ static int mem_cgroup_move_parent(struct page *page,
        if (isolate_lru_page(page))
                goto put;
 
-       if (PageTransHuge(page))
-               page_size = HPAGE_SIZE;
+       nr_pages = hpage_nr_pages(page);
 
        parent = mem_cgroup_from_cont(pcg);
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask,
-                               &parent, false, page_size);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
        if (ret || !parent)
                goto put_back;
 
-       if (page_size > PAGE_SIZE)
+       if (nr_pages > 1)
                flags = compound_lock_irqsave(page);
 
-       ret = mem_cgroup_move_account(page, pc, child, parent, true, page_size);
+       ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
        if (ret)
-               __mem_cgroup_cancel_charge(parent, page_size >> PAGE_SHIFT);
+               __mem_cgroup_cancel_charge(parent, nr_pages);
 
-       if (page_size > PAGE_SIZE)
+       if (nr_pages > 1)
                compound_unlock_irqrestore(page, flags);
 put_back:
        putback_lru_page(page);
@@ -2327,13 +2394,13 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask, enum charge_type ctype)
 {
        struct mem_cgroup *mem = NULL;
-       int page_size = PAGE_SIZE;
+       unsigned int nr_pages = 1;
        struct page_cgroup *pc;
        bool oom = true;
        int ret;
 
        if (PageTransHuge(page)) {
-               page_size <<= compound_order(page);
+               nr_pages <<= compound_order(page);
                VM_BUG_ON(!PageTransHuge(page));
                /*
                 * Never OOM-kill a process for a huge page.  The
@@ -2345,11 +2412,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        pc = lookup_page_cgroup(page);
        BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
 
-       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
+       ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
        if (ret || !mem)
                return ret;
 
-       __mem_cgroup_commit_charge(mem, page, pc, ctype, page_size);
+       __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
        return 0;
 }
 
@@ -2377,9 +2444,26 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                                        enum charge_type ctype);
 
+static void
+__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
+                                       enum charge_type ctype)
+{
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+       /*
+        * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
+        * is already on LRU. It means the page may on some other page_cgroup's
+        * LRU. Take care of it.
+        */
+       mem_cgroup_lru_del_before_commit(page);
+       __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
+       mem_cgroup_lru_add_after_commit(page);
+       return;
+}
+
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
+       struct mem_cgroup *mem = NULL;
        int ret;
 
        if (mem_cgroup_disabled())
@@ -2414,14 +2498,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
        if (unlikely(!mm))
                mm = &init_mm;
 
-       if (page_is_file_cache(page))
-               return mem_cgroup_charge_common(page, mm, gfp_mask,
-                               MEM_CGROUP_CHARGE_TYPE_CACHE);
+       if (page_is_file_cache(page)) {
+               ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
+               if (ret || !mem)
+                       return ret;
 
+               /*
+                * FUSE reuses pages without going through the final
+                * put that would remove them from the LRU list, make
+                * sure that they get relinked properly.
+                */
+               __mem_cgroup_commit_charge_lrucare(page, mem,
+                                       MEM_CGROUP_CHARGE_TYPE_CACHE);
+               return ret;
+       }
        /* shmem */
        if (PageSwapCache(page)) {
-               struct mem_cgroup *mem;
-
                ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
                if (!ret)
                        __mem_cgroup_commit_charge_swapin(page, mem,
@@ -2465,30 +2557,26 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
        if (!mem)
                goto charge_cur_mm;
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
+       ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
        css_put(&mem->css);
        return ret;
 charge_cur_mm:
        if (unlikely(!mm))
                mm = &init_mm;
-       return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
+       return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
 }
 
 static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                                        enum charge_type ctype)
 {
-       struct page_cgroup *pc;
-
        if (mem_cgroup_disabled())
                return;
        if (!ptr)
                return;
        cgroup_exclude_rmdir(&ptr->css);
-       pc = lookup_page_cgroup(page);
-       mem_cgroup_lru_del_before_commit_swapcache(page);
-       __mem_cgroup_commit_charge(ptr, page, pc, ctype, PAGE_SIZE);
-       mem_cgroup_lru_add_after_commit_swapcache(page);
+
+       __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -2539,12 +2627,13 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
        __mem_cgroup_cancel_charge(mem, 1);
 }
 
-static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
-             int page_size)
+static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
+                                  unsigned int nr_pages,
+                                  const enum charge_type ctype)
 {
        struct memcg_batch_info *batch = NULL;
        bool uncharge_memsw = true;
+
        /* If swapout, usage of swap doesn't decrease */
        if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
                uncharge_memsw = false;
@@ -2568,7 +2657,7 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
        if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
                goto direct_uncharge;
 
-       if (page_size != PAGE_SIZE)
+       if (nr_pages > 1)
                goto direct_uncharge;
 
        /*
@@ -2584,9 +2673,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
                batch->memsw_nr_pages++;
        return;
 direct_uncharge:
-       res_counter_uncharge(&mem->res, page_size);
+       res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
        if (uncharge_memsw)
-               res_counter_uncharge(&mem->memsw, page_size);
+               res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
        if (unlikely(batch->memcg != mem))
                memcg_oom_recover(mem);
        return;
@@ -2598,10 +2687,9 @@ direct_uncharge:
 static struct mem_cgroup *
 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
-       int count;
-       struct page_cgroup *pc;
        struct mem_cgroup *mem = NULL;
-       int page_size = PAGE_SIZE;
+       unsigned int nr_pages = 1;
+       struct page_cgroup *pc;
 
        if (mem_cgroup_disabled())
                return NULL;
@@ -2610,11 +2698,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
                return NULL;
 
        if (PageTransHuge(page)) {
-               page_size <<= compound_order(page);
+               nr_pages <<= compound_order(page);
                VM_BUG_ON(!PageTransHuge(page));
        }
-
-       count = page_size >> PAGE_SHIFT;
        /*
         * Check if our page_cgroup is valid
         */
@@ -2647,7 +2733,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
                break;
        }
 
-       mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
+       mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
 
        ClearPageCgroupUsed(pc);
        /*
@@ -2668,7 +2754,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
                mem_cgroup_get(mem);
        }
        if (!mem_cgroup_is_root(mem))
-               __do_uncharge(mem, ctype, page_size);
+               mem_cgroup_do_uncharge(mem, nr_pages, ctype);
 
        return mem;
 
@@ -2860,8 +2946,8 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
 int mem_cgroup_prepare_migration(struct page *page,
        struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
 {
-       struct page_cgroup *pc;
        struct mem_cgroup *mem = NULL;
+       struct page_cgroup *pc;
        enum charge_type ctype;
        int ret = 0;
 
@@ -2917,7 +3003,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                return 0;
 
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
        css_put(&mem->css);/* drop extra refcnt */
        if (ret || *ptr == NULL) {
                if (PageAnon(page)) {
@@ -2944,7 +3030,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(mem, page, pc, ctype, PAGE_SIZE);
+       __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
        return ret;
 }
 
@@ -3478,13 +3564,13 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
 }
 
 
-static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
-                               enum mem_cgroup_stat_index idx)
+static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
+                                              enum mem_cgroup_stat_index idx)
 {
        struct mem_cgroup *iter;
-       s64 val = 0;
+       long val = 0;
 
-       /* each per cpu's value can be minus.Then, use s64 */
+       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_mem_cgroup_tree(iter, mem)
                val += mem_cgroup_read_stat(iter, idx);
 
@@ -3504,12 +3590,11 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
                        return res_counter_read_u64(&mem->memsw, RES_USAGE);
        }
 
-       val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
-       val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
+       val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
+       val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
 
        if (swap)
-               val += mem_cgroup_get_recursive_idx_stat(mem,
-                               MEM_CGROUP_STAT_SWAPOUT);
+               val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
 
        return val << PAGE_SHIFT;
 }
@@ -3729,9 +3814,9 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
        s->stat[MCS_RSS] += val * PAGE_SIZE;
        val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
        s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
-       val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
        s->stat[MCS_PGPGIN] += val;
-       val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
        s->stat[MCS_PGPGOUT] += val;
        if (do_swap_account) {
                val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
@@ -4598,8 +4683,7 @@ one_by_one:
                        batch_count = PRECHARGE_COUNT_AT_ONCE;
                        cond_resched();
                }
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
-                                             PAGE_SIZE);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
                if (ret || !mem)
                        /* mem_cgroup_clear_mc() will do uncharge later */
                        return -ENOMEM;
@@ -4945,8 +5029,8 @@ retry:
                        if (isolate_lru_page(page))
                                goto put;
                        pc = lookup_page_cgroup(page);
-                       if (!mem_cgroup_move_account(page, pc,
-                                       mc.from, mc.to, false, PAGE_SIZE)) {
+                       if (!mem_cgroup_move_account(page, 1, pc,
+                                                    mc.from, mc.to, false)) {
                                mc.precharge--;
                                /* we uncharge from mc.from later. */
                                mc.moved_charge++;