]> Pileus Git - ~andy/linux/blobdiff - mm/memcontrol.c
Revert "memcg: get rid of percpu_charge_mutex lock"
[~andy/linux] / mm / memcontrol.c
index 95d6c256b54c461b8e3544355cbe16423390e05b..930de943727117043c0274f35cbe6e70d5eb8160 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/limits.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
-#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -205,6 +204,50 @@ struct mem_cgroup_eventfd_list {
 static void mem_cgroup_threshold(struct mem_cgroup *mem);
 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 
+enum {
+       SCAN_BY_LIMIT,
+       SCAN_BY_SYSTEM,
+       NR_SCAN_CONTEXT,
+       SCAN_BY_SHRINK, /* not recorded now */
+};
+
+enum {
+       SCAN,
+       SCAN_ANON,
+       SCAN_FILE,
+       ROTATE,
+       ROTATE_ANON,
+       ROTATE_FILE,
+       FREED,
+       FREED_ANON,
+       FREED_FILE,
+       ELAPSED,
+       NR_SCANSTATS,
+};
+
+struct scanstat {
+       spinlock_t      lock;
+       unsigned long   stats[NR_SCAN_CONTEXT][NR_SCANSTATS];
+       unsigned long   rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS];
+};
+
+const char *scanstat_string[NR_SCANSTATS] = {
+       "scanned_pages",
+       "scanned_anon_pages",
+       "scanned_file_pages",
+       "rotated_pages",
+       "rotated_anon_pages",
+       "rotated_file_pages",
+       "freed_pages",
+       "freed_anon_pages",
+       "freed_file_pages",
+       "elapsed_ns",
+};
+#define SCANSTAT_WORD_LIMIT    "_by_limit"
+#define SCANSTAT_WORD_SYSTEM   "_by_system"
+#define SCANSTAT_WORD_HIERARCHY        "_under_hierarchy"
+
+
 /*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
@@ -270,7 +313,8 @@ struct mem_cgroup {
 
        /* For oom notifier event fd */
        struct list_head oom_notify;
-
+       /* For recording LRU-scan statistics */
+       struct scanstat scanstat;
        /*
         * Should we move charges of a task when a task is moved into this
         * mem_cgroup ? And what type of charges should we move ?
@@ -1063,6 +1107,21 @@ void mem_cgroup_move_lists(struct page *page,
        mem_cgroup_add_lru_list(page, to);
 }
 
+/*
+ * Checks whether given mem is same or in the root_mem's
+ * hierarchy subtree
+ */
+static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
+               struct mem_cgroup *mem)
+{
+       if (root_mem != mem) {
+               return (root_mem->use_hierarchy &&
+                       css_is_ancestor(&mem->css, &root_mem->css));
+       }
+
+       return true;
+}
+
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 {
        int ret;
@@ -1082,10 +1141,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
         * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
         * hierarchy(even if use_hierarchy is disabled in "mem").
         */
-       if (mem->use_hierarchy)
-               ret = css_is_ancestor(&curr->css, &mem->css);
-       else
-               ret = (curr == mem);
+       ret = mem_cgroup_same_or_subtree(mem, curr);
        css_put(&curr->css);
        return ret;
 }
@@ -1324,10 +1380,9 @@ static bool mem_cgroup_under_move(struct mem_cgroup *mem)
        to = mc.to;
        if (!from)
                goto unlock;
-       if (from == mem || to == mem
-           || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
-           || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
-               ret = true;
+
+       ret = mem_cgroup_same_or_subtree(mem, from)
+               || mem_cgroup_same_or_subtree(mem, to);
 unlock:
        spin_unlock(&mc.lock);
        return ret;
@@ -1623,6 +1678,44 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
 }
 #endif
 
+static void __mem_cgroup_record_scanstat(unsigned long *stats,
+                          struct memcg_scanrecord *rec)
+{
+
+       stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1];
+       stats[SCAN_ANON] += rec->nr_scanned[0];
+       stats[SCAN_FILE] += rec->nr_scanned[1];
+
+       stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1];
+       stats[ROTATE_ANON] += rec->nr_rotated[0];
+       stats[ROTATE_FILE] += rec->nr_rotated[1];
+
+       stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1];
+       stats[FREED_ANON] += rec->nr_freed[0];
+       stats[FREED_FILE] += rec->nr_freed[1];
+
+       stats[ELAPSED] += rec->elapsed;
+}
+
+static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec)
+{
+       struct mem_cgroup *mem;
+       int context = rec->context;
+
+       if (context >= NR_SCAN_CONTEXT)
+               return;
+
+       mem = rec->mem;
+       spin_lock(&mem->scanstat.lock);
+       __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec);
+       spin_unlock(&mem->scanstat.lock);
+
+       mem = rec->root;
+       spin_lock(&mem->scanstat.lock);
+       __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec);
+       spin_unlock(&mem->scanstat.lock);
+}
+
 /*
  * Scan the hierarchy if needed to reclaim memory. We remember the last child
  * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1647,15 +1740,25 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
        bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
        bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
+       struct memcg_scanrecord rec;
        unsigned long excess;
-       unsigned long nr_scanned;
+       unsigned long scanned;
 
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
-       if (!check_soft && root_mem->memsw_is_minimum)
+       if (!check_soft && !shrink && root_mem->memsw_is_minimum)
                noswap = true;
 
+       if (shrink)
+               rec.context = SCAN_BY_SHRINK;
+       else if (check_soft)
+               rec.context = SCAN_BY_SYSTEM;
+       else
+               rec.context = SCAN_BY_LIMIT;
+
+       rec.root = root_mem;
+
        while (1) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem) {
@@ -1696,14 +1799,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        css_put(&victim->css);
                        continue;
                }
+               rec.mem = victim;
+               rec.nr_scanned[0] = 0;
+               rec.nr_scanned[1] = 0;
+               rec.nr_rotated[0] = 0;
+               rec.nr_rotated[1] = 0;
+               rec.nr_freed[0] = 0;
+               rec.nr_freed[1] = 0;
+               rec.elapsed = 0;
                /* we use swappiness of local cgroup */
                if (check_soft) {
                        ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
-                               noswap, zone, &nr_scanned);
-                       *total_scanned += nr_scanned;
+                               noswap, zone, &rec, &scanned);
+                       *total_scanned += scanned;
                } else
                        ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
-                                               noswap);
+                                               noswap, &rec);
+               mem_cgroup_record_scanstat(&rec);
                css_put(&victim->css);
                /*
                 * At shrinking usage, we can't check we should stop here or
@@ -1725,7 +1837,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
- * Has to be called with memcg_oom_mutex
+ * Has to be called with memcg_oom_lock
  */
 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
 {
@@ -1770,7 +1882,7 @@ done:
 }
 
 /*
- * Has to be called with memcg_oom_mutex
+ * Has to be called with memcg_oom_lock
  */
 static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
 {
@@ -1802,7 +1914,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
                atomic_add_unless(&iter->under_oom, -1, 0);
 }
 
-static DEFINE_MUTEX(memcg_oom_mutex);
+static DEFINE_SPINLOCK(memcg_oom_lock);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
 struct oom_wait_info {
@@ -1813,25 +1925,20 @@ struct oom_wait_info {
 static int memcg_oom_wake_function(wait_queue_t *wait,
        unsigned mode, int sync, void *arg)
 {
-       struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
+       struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
+                         *oom_wait_mem;
        struct oom_wait_info *oom_wait_info;
 
        oom_wait_info = container_of(wait, struct oom_wait_info, wait);
+       oom_wait_mem = oom_wait_info->mem;
 
-       if (oom_wait_info->mem == wake_mem)
-               goto wakeup;
-       /* if no hierarchy, no match */
-       if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
-               return 0;
        /*
         * Both of oom_wait_info->mem and wake_mem are stable under us.
         * Then we can use css_is_ancestor without taking care of RCU.
         */
-       if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
-           !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
+       if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
+                       && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
                return 0;
-
-wakeup:
        return autoremove_wake_function(wait, mode, sync, arg);
 }
 
@@ -1864,7 +1971,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
        mem_cgroup_mark_under_oom(mem);
 
        /* At first, try to OOM lock hierarchy under mem.*/
-       mutex_lock(&memcg_oom_mutex);
+       spin_lock(&memcg_oom_lock);
        locked = mem_cgroup_oom_lock(mem);
        /*
         * Even if signal_pending(), we can't quit charge() loop without
@@ -1876,7 +1983,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
                need_to_kill = false;
        if (locked)
                mem_cgroup_oom_notify(mem);
-       mutex_unlock(&memcg_oom_mutex);
+       spin_unlock(&memcg_oom_lock);
 
        if (need_to_kill) {
                finish_wait(&memcg_oom_waitq, &owait.wait);
@@ -1885,11 +1992,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
                schedule();
                finish_wait(&memcg_oom_waitq, &owait.wait);
        }
-       mutex_lock(&memcg_oom_mutex);
+       spin_lock(&memcg_oom_lock);
        if (locked)
                mem_cgroup_oom_unlock(mem);
        memcg_wakeup_oom(mem);
-       mutex_unlock(&memcg_oom_mutex);
+       spin_unlock(&memcg_oom_lock);
 
        mem_cgroup_unmark_under_oom(mem);
 
@@ -2052,19 +2159,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
 }
 
 /*
- * Tries to drain stocked charges in other cpus. This function is asynchronous
- * and just put a work per cpu for draining localy on each cpu. Caller can
- * expects some charges will be back to res_counter later but cannot wait for
- * it.
+ * Drains all per-CPU charge caches for given root_mem resp. subtree
+ * of the hierarchy under it. sync flag says whether we should block
+ * until the work is done.
  */
-static void drain_all_stock_async(struct mem_cgroup *root_mem)
+static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
 {
        int cpu, curcpu;
-       /*
-        * If someone calls draining, avoid adding more kworker runs.
-        */
-       if (!mutex_trylock(&percpu_charge_mutex))
-               return;
+
        /* Notify other cpus that system-wide "drain" is running */
        get_online_cpus();
        /*
@@ -2078,33 +2180,54 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
                struct mem_cgroup *mem;
 
-               if (cpu == curcpu)
-                       continue;
-
                mem = stock->cached;
-               if (!mem)
+               if (!mem || !stock->nr_pages)
+                       continue;
+               if (!mem_cgroup_same_or_subtree(root_mem, mem))
                        continue;
-               if (mem != root_mem) {
-                       if (!root_mem->use_hierarchy)
-                               continue;
-                       /* check whether "mem" is under tree of "root_mem" */
-                       if (!css_is_ancestor(&mem->css, &root_mem->css))
-                               continue;
+               if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
+                       if (cpu == curcpu)
+                               drain_local_stock(&stock->work);
+                       else
+                               schedule_work_on(cpu, &stock->work);
                }
-               if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
-                       schedule_work_on(cpu, &stock->work);
        }
+
+       if (!sync)
+               goto out;
+
+       for_each_online_cpu(cpu) {
+               struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+               if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
+                       flush_work(&stock->work);
+       }
+out:
        put_online_cpus();
+}
+
+/*
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
+ * and just put a work per cpu for draining localy on each cpu. Caller can
+ * expects some charges will be back to res_counter later but cannot wait for
+ * it.
+ */
+static void drain_all_stock_async(struct mem_cgroup *root_mem)
+{
+       /*
+        * If someone calls draining, avoid adding more kworker runs.
+        */
+       if (!mutex_trylock(&percpu_charge_mutex))
+               return;
+       drain_all_stock(root_mem, false);
        mutex_unlock(&percpu_charge_mutex);
-       /* We don't wait for flush_work */
 }
 
 /* This is a synchronous drain interface. */
-static void drain_all_stock_sync(void)
+static void drain_all_stock_sync(struct mem_cgroup *root_mem)
 {
        /* called when force_empty is called */
        mutex_lock(&percpu_charge_mutex);
-       schedule_on_each_cpu(drain_local_stock);
+       drain_all_stock(root_mem, true);
        mutex_unlock(&percpu_charge_mutex);
 }
 
@@ -2757,30 +2880,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                return 0;
        if (PageCompound(page))
                return 0;
-       /*
-        * Corner case handling. This is called from add_to_page_cache()
-        * in usual. But some FS (shmem) precharges this page before calling it
-        * and call add_to_page_cache() with GFP_NOWAIT.
-        *
-        * For GFP_NOWAIT case, the page may be pre-charged before calling
-        * add_to_page_cache(). (See shmem.c) check it here and avoid to call
-        * charge twice. (It works but has to pay a bit larger cost.)
-        * And when the page is SwapCache, it should take swap information
-        * into account. This is under lock_page() now.
-        */
-       if (!(gfp_mask & __GFP_WAIT)) {
-               struct page_cgroup *pc;
-
-               pc = lookup_page_cgroup(page);
-               if (!pc)
-                       return 0;
-               lock_page_cgroup(pc);
-               if (PageCgroupUsed(pc)) {
-                       unlock_page_cgroup(pc);
-                       return 0;
-               }
-               unlock_page_cgroup(pc);
-       }
 
        if (unlikely(!mm))
                mm = &init_mm;
@@ -3370,31 +3469,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
        cgroup_release_and_wakeup_rmdir(&mem->css);
 }
 
-/*
- * A call to try to shrink memory usage on charge failure at shmem's swapin.
- * Calling hierarchical_reclaim is not enough because we should update
- * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
- * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
- * not from the memcg which this page would be charged to.
- * try_charge_swapin does all of these works properly.
- */
-int mem_cgroup_shmem_charge_fallback(struct page *page,
-                           struct mm_struct *mm,
-                           gfp_t gfp_mask)
-{
-       struct mem_cgroup *mem;
-       int ret;
-
-       if (mem_cgroup_disabled())
-               return 0;
-
-       ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
-       if (!ret)
-               mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
-
-       return ret;
-}
-
 #ifdef CONFIG_DEBUG_VM
 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
 {
@@ -3753,7 +3827,7 @@ move_account:
                        goto out;
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
-               drain_all_stock_sync();
+               drain_all_stock_sync(mem);
                ret = 0;
                mem_cgroup_start_move(mem);
                for_each_node_state(node, N_HIGH_MEMORY) {
@@ -3792,14 +3866,18 @@ try_to_free:
        /* try to free all pages in this cgroup */
        shrink = 1;
        while (nr_retries && mem->res.usage > 0) {
+               struct memcg_scanrecord rec;
                int progress;
 
                if (signal_pending(current)) {
                        ret = -EINTR;
                        goto out;
                }
+               rec.context = SCAN_BY_SHRINK;
+               rec.mem = mem;
+               rec.root = mem;
                progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
-                                               false);
+                                               false, &rec);
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
@@ -4553,7 +4631,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
        if (!event)
                return -ENOMEM;
 
-       mutex_lock(&memcg_oom_mutex);
+       spin_lock(&memcg_oom_lock);
 
        event->eventfd = eventfd;
        list_add(&event->list, &memcg->oom_notify);
@@ -4561,7 +4639,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
        /* already in OOM ? */
        if (atomic_read(&memcg->under_oom))
                eventfd_signal(eventfd, 1);
-       mutex_unlock(&memcg_oom_mutex);
+       spin_unlock(&memcg_oom_lock);
 
        return 0;
 }
@@ -4575,7 +4653,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
 
        BUG_ON(type != _OOM_TYPE);
 
-       mutex_lock(&memcg_oom_mutex);
+       spin_lock(&memcg_oom_lock);
 
        list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
                if (ev->eventfd == eventfd) {
@@ -4584,7 +4662,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
                }
        }
 
-       mutex_unlock(&memcg_oom_mutex);
+       spin_unlock(&memcg_oom_lock);
 }
 
 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
@@ -4643,6 +4721,54 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
+static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp,
+                               struct cftype *cft,
+                               struct cgroup_map_cb *cb)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+       char string[64];
+       int i;
+
+       for (i = 0; i < NR_SCANSTATS; i++) {
+               strcpy(string, scanstat_string[i]);
+               strcat(string, SCANSTAT_WORD_LIMIT);
+               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_LIMIT][i]);
+       }
+
+       for (i = 0; i < NR_SCANSTATS; i++) {
+               strcpy(string, scanstat_string[i]);
+               strcat(string, SCANSTAT_WORD_SYSTEM);
+               cb->fill(cb, string,  mem->scanstat.stats[SCAN_BY_SYSTEM][i]);
+       }
+
+       for (i = 0; i < NR_SCANSTATS; i++) {
+               strcpy(string, scanstat_string[i]);
+               strcat(string, SCANSTAT_WORD_LIMIT);
+               strcat(string, SCANSTAT_WORD_HIERARCHY);
+               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_LIMIT][i]);
+       }
+       for (i = 0; i < NR_SCANSTATS; i++) {
+               strcpy(string, scanstat_string[i]);
+               strcat(string, SCANSTAT_WORD_SYSTEM);
+               strcat(string, SCANSTAT_WORD_HIERARCHY);
+               cb->fill(cb, string,  mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]);
+       }
+       return 0;
+}
+
+static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp,
+                               unsigned int event)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+       spin_lock(&mem->scanstat.lock);
+       memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats));
+       memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats));
+       spin_unlock(&mem->scanstat.lock);
+       return 0;
+}
+
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4713,6 +4839,11 @@ static struct cftype mem_cgroup_files[] = {
                .mode = S_IRUGO,
        },
 #endif
+       {
+               .name = "vmscan_stat",
+               .read_map = mem_cgroup_vmscan_stat_read,
+               .trigger = mem_cgroup_reset_vmscan_stat,
+       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4976,6 +5107,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        atomic_set(&mem->refcnt, 1);
        mem->move_charge_at_immigrate = 0;
        mutex_init(&mem->thresholds_lock);
+       spin_lock_init(&mem->scanstat.lock);
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
@@ -5156,15 +5288,17 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                pgoff = pte_to_pgoff(ptent);
 
        /* page is moved even if it's not RSS of this task(page-faulted). */
-       if (!mapping_cap_swap_backed(mapping)) { /* normal file */
-               page = find_get_page(mapping, pgoff);
-       } else { /* shmem/tmpfs file. we should take account of swap too. */
-               swp_entry_t ent;
-               mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
+       page = find_get_page(mapping, pgoff);
+
+#ifdef CONFIG_SWAP
+       /* shmem/tmpfs may report page out on swap: account for that too. */
+       if (radix_tree_exceptional_entry(page)) {
+               swp_entry_t swap = radix_to_swp_entry(page);
                if (do_swap_account)
-                       entry->val = ent.val;
+                       *entry = swap;
+               page = find_get_page(&swapper_space, swap.val);
        }
-
+#endif
        return page;
 }