]> Pileus Git - ~andy/linux/blobdiff - mm/memcontrol.c
Merge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[~andy/linux] / mm / memcontrol.c
index ae2f0a8ab7617938c7922756eb1e9fab67142582..26c6f4ec20f43e626f0564776f1b38be8a54cd41 100644 (file)
@@ -230,10 +230,30 @@ struct mem_cgroup {
         * the counter to account for memory usage
         */
        struct res_counter res;
-       /*
-        * the counter to account for mem+swap usage.
-        */
-       struct res_counter memsw;
+
+       union {
+               /*
+                * the counter to account for mem+swap usage.
+                */
+               struct res_counter memsw;
+
+               /*
+                * rcu_freeing is used only when freeing struct mem_cgroup,
+                * so put it into a union to avoid wasting more memory.
+                * It must be disjoint from the css field.  It could be
+                * in a union with the res field, but res plays a much
+                * larger part in mem_cgroup life than memsw, and might
+                * be of interest, even at time of free, when debugging.
+                * So share rcu_head with the less interesting memsw.
+                */
+               struct rcu_head rcu_freeing;
+               /*
+                * But when using vfree(), that cannot be done at
+                * interrupt time, so we must then queue the work.
+                */
+               struct work_struct work_freeing;
+       };
+
        /*
         * Per cgroup active and inactive list, similar to the
         * per zone LRU lists.
@@ -379,7 +399,7 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
 static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
 void sock_update_memcg(struct sock *sk)
 {
-       if (static_branch(&memcg_socket_limit_enabled)) {
+       if (mem_cgroup_sockets_enabled) {
                struct mem_cgroup *memcg;
 
                BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -411,7 +431,7 @@ EXPORT_SYMBOL(sock_update_memcg);
 
 void sock_release_memcg(struct sock *sk)
 {
-       if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
                struct mem_cgroup *memcg;
                WARN_ON(!sk->sk_cgrp->memcg);
                memcg = sk->sk_cgrp->memcg;
@@ -776,7 +796,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
        /* threshold event is triggered in finer grain than soft limit */
        if (unlikely(mem_cgroup_event_ratelimit(memcg,
                                                MEM_CGROUP_TARGET_THRESH))) {
-               bool do_softlimit, do_numainfo;
+               bool do_softlimit;
+               bool do_numainfo __maybe_unused;
 
                do_softlimit = mem_cgroup_event_ratelimit(memcg,
                                                MEM_CGROUP_TARGET_SOFTLIMIT);
@@ -1041,6 +1062,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
 
        pc = lookup_page_cgroup(page);
        memcg = pc->mem_cgroup;
+
+       /*
+        * Surreptitiously switch any uncharged page to root:
+        * an uncharged page off lru does nothing to secure
+        * its former mem_cgroup from sudden removal.
+        *
+        * Our caller holds lru_lock, and PageCgroupUsed is updated
+        * under page_cgroup lock: between them, they make all uses
+        * of pc->mem_cgroup safe.
+        */
+       if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+               pc->mem_cgroup = memcg = root_mem_cgroup;
+
        mz = page_cgroup_zoneinfo(memcg, page);
        /* compound_order() is stabilized through lru_lock */
        MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2407,8 +2441,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                                       struct page *page,
                                       unsigned int nr_pages,
                                       struct page_cgroup *pc,
-                                      enum charge_type ctype)
+                                      enum charge_type ctype,
+                                      bool lrucare)
 {
+       struct zone *uninitialized_var(zone);
+       bool was_on_lru = false;
+
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
@@ -2419,6 +2457,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
         * we don't need page_cgroup_lock about tail pages, becase they are not
         * accessed by any other context at this point.
         */
+
+       /*
+        * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
+        * may already be on some other mem_cgroup's LRU.  Take care of it.
+        */
+       if (lrucare) {
+               zone = page_zone(page);
+               spin_lock_irq(&zone->lru_lock);
+               if (PageLRU(page)) {
+                       ClearPageLRU(page);
+                       del_page_from_lru_list(zone, page, page_lru(page));
+                       was_on_lru = true;
+               }
+       }
+
        pc->mem_cgroup = memcg;
        /*
         * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2442,9 +2495,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                break;
        }
 
+       if (lrucare) {
+               if (was_on_lru) {
+                       VM_BUG_ON(PageLRU(page));
+                       SetPageLRU(page);
+                       add_page_to_lru_list(zone, page, page_lru(page));
+               }
+               spin_unlock_irq(&zone->lru_lock);
+       }
+
        mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
        unlock_page_cgroup(pc);
-       WARN_ON_ONCE(PageLRU(page));
+
        /*
         * "charge_statistics" updated event counter. Then, check it.
         * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2642,7 +2704,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
        if (ret == -ENOMEM)
                return ret;
-       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
        return 0;
 }
 
@@ -2662,35 +2724,6 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                                        enum charge_type ctype);
 
-static void
-__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
-                                       enum charge_type ctype)
-{
-       struct page_cgroup *pc = lookup_page_cgroup(page);
-       struct zone *zone = page_zone(page);
-       unsigned long flags;
-       bool removed = false;
-
-       /*
-        * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
-        * is already on LRU. It means the page may on some other page_cgroup's
-        * LRU. Take care of it.
-        */
-       spin_lock_irqsave(&zone->lru_lock, flags);
-       if (PageLRU(page)) {
-               del_page_from_lru_list(zone, page, page_lru(page));
-               ClearPageLRU(page);
-               removed = true;
-       }
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
-       if (removed) {
-               add_page_to_lru_list(zone, page, page_lru(page));
-               SetPageLRU(page);
-       }
-       spin_unlock_irqrestore(&zone->lru_lock, flags);
-       return;
-}
-
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -2768,13 +2801,16 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
                                        enum charge_type ctype)
 {
+       struct page_cgroup *pc;
+
        if (mem_cgroup_disabled())
                return;
        if (!memcg)
                return;
        cgroup_exclude_rmdir(&memcg->css);
 
-       __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+       pc = lookup_page_cgroup(page);
+       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -3026,23 +3062,6 @@ void mem_cgroup_uncharge_end(void)
        batch->memcg = NULL;
 }
 
-/*
- * A function for resetting pc->mem_cgroup for newly allocated pages.
- * This function should be called if the newpage will be added to LRU
- * before start accounting.
- */
-void mem_cgroup_reset_owner(struct page *newpage)
-{
-       struct page_cgroup *pc;
-
-       if (mem_cgroup_disabled())
-               return;
-
-       pc = lookup_page_cgroup(newpage);
-       VM_BUG_ON(PageCgroupUsed(pc));
-       pc->mem_cgroup = root_mem_cgroup;
-}
-
 #ifdef CONFIG_SWAP
 /*
  * called after __delete_from_swap_cache() and drop "page" account.
@@ -3247,7 +3266,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
        return ret;
 }
 
@@ -3331,7 +3350,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
         * the newpage may be on LRU(or pagevec for LRU) already. We lock
         * LRU while we overwrite pc->mem_cgroup.
         */
-       __mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -4413,6 +4432,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
         */
        BUG_ON(!thresholds);
 
+       if (!thresholds->primary)
+               goto unlock;
+
        usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
 
        /* Check if a threshold crossed before removing */
@@ -4461,7 +4483,7 @@ swap_buffers:
 
        /* To be sure that nobody uses thresholds */
        synchronize_rcu();
-
+unlock:
        mutex_unlock(&memcg->thresholds_lock);
 }
 
@@ -4775,6 +4797,27 @@ out_free:
        return NULL;
 }
 
+/*
+ * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * but in process context.  The work_freeing structure is overlaid
+ * on the rcu_freeing structure, which itself is overlaid on memsw.
+ */
+static void vfree_work(struct work_struct *work)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = container_of(work, struct mem_cgroup, work_freeing);
+       vfree(memcg);
+}
+static void vfree_rcu(struct rcu_head *rcu_head)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
+       INIT_WORK(&memcg->work_freeing, vfree_work);
+       schedule_work(&memcg->work_freeing);
+}
+
 /*
  * At destroying mem_cgroup, references from swap_cgroup can remain.
  * (scanning all at force_empty is too costly...)
@@ -4798,9 +4841,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 
        free_percpu(memcg->stat);
        if (sizeof(struct mem_cgroup) < PAGE_SIZE)
-               kfree(memcg);
+               kfree_rcu(memcg, rcu_freeing);
        else
-               vfree(memcg);
+               call_rcu(&memcg->rcu_freeing, vfree_rcu);
 }
 
 static void mem_cgroup_get(struct mem_cgroup *memcg)