]> Pileus Git - ~andy/linux/blobdiff - kernel/cgroup.c
pid namespaces: add support for pid namespaces hierarchy
[~andy/linux] / kernel / cgroup.c
index cc68fe68a60ebdfd4a07dc4d5fa955978b14de73..ca38db223f8407857ab5dc99a355d8c5da5c4562 100644 (file)
 #include <linux/mutex.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
+#include <linux/proc_fs.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
+#include <linux/backing-dev.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/magic.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/sort.h>
+#include <linux/kmod.h>
+#include <linux/delayacct.h>
+#include <linux/cgroupstats.h>
+
 #include <asm/atomic.h>
 
+static DEFINE_MUTEX(cgroup_mutex);
+
 /* Generate an array of cgroup subsystem pointers */
 #define SUBSYS(_x) &_x ## _subsys,
 
@@ -81,6 +89,13 @@ struct cgroupfs_root {
 
        /* Hierarchy-specific flags */
        unsigned long flags;
+
+       /* The path to use for release notifications. No locking
+        * between setting and use - so if userspace updates this
+        * while child cgroups exist, you could miss a
+        * notification. We ensure that it's always a valid
+        * NUL-terminated string */
+       char release_agent_path[PATH_MAX];
 };
 
 
@@ -94,6 +109,7 @@ static struct cgroupfs_root rootnode;
 /* The list of hierarchy roots */
 
 static LIST_HEAD(roots);
+static int root_count;
 
 /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
 #define dummytop (&rootnode.top_cgroup)
@@ -107,7 +123,13 @@ static int need_forkexit_callback;
 
 /* bits in struct cgroup flags field */
 enum {
+       /* Control Group is dead */
        CONT_REMOVED,
+       /* Control Group has previously had a child cgroup or a task,
+        * but no longer (only if CONT_NOTIFY_ON_RELEASE is set) */
+       CONT_RELEASABLE,
+       /* Control Group requires release notifications to userspace */
+       CONT_NOTIFY_ON_RELEASE,
 };
 
 /* convenient tests for these bits */
@@ -121,6 +143,19 @@ enum {
        ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
 };
 
+inline int cgroup_is_releasable(const struct cgroup *cont)
+{
+       const int bits =
+               (1 << CONT_RELEASABLE) |
+               (1 << CONT_NOTIFY_ON_RELEASE);
+       return (cont->flags & bits) == bits;
+}
+
+inline int notify_on_release(const struct cgroup *cont)
+{
+       return test_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
+}
+
 /*
  * for_each_subsys() allows you to iterate on each subsystem attached to
  * an active hierarchy
@@ -132,12 +167,57 @@ list_for_each_entry(_ss, &_root->subsys_list, sibling)
 #define for_each_root(_root) \
 list_for_each_entry(_root, &roots, root_list)
 
-/* Each task_struct has an embedded css_set, so the get/put
- * operation simply takes a reference count on all the cgroups
- * referenced by subsystems in this css_set. This can end up
- * multiple-counting some cgroups, but that's OK - the ref-count is
- * just a busy/not-busy indicator; ensuring that we only count each
- * cgroup once would require taking a global lock to ensure that no
+/* the list of cgroups eligible for automatic release. Protected by
+ * release_list_lock */
+static LIST_HEAD(release_list);
+static DEFINE_SPINLOCK(release_list_lock);
+static void cgroup_release_agent(struct work_struct *work);
+static DECLARE_WORK(release_agent_work, cgroup_release_agent);
+static void check_for_release(struct cgroup *cont);
+
+/* Link structure for associating css_set objects with cgroups */
+struct cg_cgroup_link {
+       /*
+        * List running through cg_cgroup_links associated with a
+        * cgroup, anchored on cgroup->css_sets
+        */
+       struct list_head cont_link_list;
+       /*
+        * List running through cg_cgroup_links pointing at a
+        * single css_set object, anchored on css_set->cg_links
+        */
+       struct list_head cg_link_list;
+       struct css_set *cg;
+};
+
+/* The default css_set - used by init and its children prior to any
+ * hierarchies being mounted. It contains a pointer to the root state
+ * for each subsystem. Also used to anchor the list of css_sets. Not
+ * reference-counted, to improve performance when child cgroups
+ * haven't been created.
+ */
+
+static struct css_set init_css_set;
+static struct cg_cgroup_link init_css_set_link;
+
+/* css_set_lock protects the list of css_set objects, and the
+ * chain of tasks off each css_set.  Nests outside task->alloc_lock
+ * due to cgroup_iter_start() */
+static DEFINE_RWLOCK(css_set_lock);
+static int css_set_count;
+
+/* We don't maintain the lists running through each css_set to its
+ * task until after the first call to cgroup_iter_start(). This
+ * reduces the fork()/exit() overhead for people who have cgroups
+ * compiled into their kernel but not actually in use */
+static int use_task_css_set_links;
+
+/* When we create or destroy a css_set, the operation simply
+ * takes/releases a reference count on all the cgroups referenced
+ * by subsystems in this css_set. This can end up multiple-counting
+ * some cgroups, but that's OK - the ref-count is just a
+ * busy/not-busy indicator; ensuring that we only count each cgroup
+ * once would require taking a global lock to ensure that no
  * subsystems moved between hierarchies while we were doing so.
  *
  * Possible TODO: decide at boot time based on the number of
@@ -145,18 +225,260 @@ list_for_each_entry(_root, &roots, root_list)
  * it's better for performance to ref-count every subsystem, or to
  * take a global lock and only add one ref count to each hierarchy.
  */
-static void get_css_set(struct css_set *cg)
+
+/*
+ * unlink a css_set from the list and free it
+ */
+static void unlink_css_set(struct css_set *cg)
+{
+       write_lock(&css_set_lock);
+       list_del(&cg->list);
+       css_set_count--;
+       while (!list_empty(&cg->cg_links)) {
+               struct cg_cgroup_link *link;
+               link = list_entry(cg->cg_links.next,
+                                 struct cg_cgroup_link, cg_link_list);
+               list_del(&link->cg_link_list);
+               list_del(&link->cont_link_list);
+               kfree(link);
+       }
+       write_unlock(&css_set_lock);
+}
+
+static void __release_css_set(struct kref *k, int taskexit)
 {
        int i;
-       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
-               atomic_inc(&cg->subsys[i]->cgroup->count);
+       struct css_set *cg = container_of(k, struct css_set, ref);
+
+       unlink_css_set(cg);
+
+       rcu_read_lock();
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+               struct cgroup *cont = cg->subsys[i]->cgroup;
+               if (atomic_dec_and_test(&cont->count) &&
+                   notify_on_release(cont)) {
+                       if (taskexit)
+                               set_bit(CONT_RELEASABLE, &cont->flags);
+                       check_for_release(cont);
+               }
+       }
+       rcu_read_unlock();
+       kfree(cg);
+}
+
+static void release_css_set(struct kref *k)
+{
+       __release_css_set(k, 0);
+}
+
+static void release_css_set_taskexit(struct kref *k)
+{
+       __release_css_set(k, 1);
+}
+
+/*
+ * refcounted get/put for css_set objects
+ */
+static inline void get_css_set(struct css_set *cg)
+{
+       kref_get(&cg->ref);
+}
+
+static inline void put_css_set(struct css_set *cg)
+{
+       kref_put(&cg->ref, release_css_set);
 }
 
-static void put_css_set(struct css_set *cg)
+static inline void put_css_set_taskexit(struct css_set *cg)
+{
+       kref_put(&cg->ref, release_css_set_taskexit);
+}
+
+/*
+ * find_existing_css_set() is a helper for
+ * find_css_set(), and checks to see whether an existing
+ * css_set is suitable. This currently walks a linked-list for
+ * simplicity; a later patch will use a hash table for better
+ * performance
+ *
+ * oldcg: the cgroup group that we're using before the cgroup
+ * transition
+ *
+ * cont: the cgroup that we're moving into
+ *
+ * template: location in which to build the desired set of subsystem
+ * state objects for the new cgroup group
+ */
+
+static struct css_set *find_existing_css_set(
+       struct css_set *oldcg,
+       struct cgroup *cont,
+       struct cgroup_subsys_state *template[])
 {
        int i;
-       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
-               atomic_dec(&cg->subsys[i]->cgroup->count);
+       struct cgroupfs_root *root = cont->root;
+       struct list_head *l = &init_css_set.list;
+
+       /* Built the set of subsystem state objects that we want to
+        * see in the new css_set */
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+               if (root->subsys_bits & (1ull << i)) {
+                       /* Subsystem is in this hierarchy. So we want
+                        * the subsystem state from the new
+                        * cgroup */
+                       template[i] = cont->subsys[i];
+               } else {
+                       /* Subsystem is not in this hierarchy, so we
+                        * don't want to change the subsystem state */
+                       template[i] = oldcg->subsys[i];
+               }
+       }
+
+       /* Look through existing cgroup groups to find one to reuse */
+       do {
+               struct css_set *cg =
+                       list_entry(l, struct css_set, list);
+
+               if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
+                       /* All subsystems matched */
+                       return cg;
+               }
+               /* Try the next cgroup group */
+               l = l->next;
+       } while (l != &init_css_set.list);
+
+       /* No existing cgroup group matched */
+       return NULL;
+}
+
+/*
+ * allocate_cg_links() allocates "count" cg_cgroup_link structures
+ * and chains them on tmp through their cont_link_list fields. Returns 0 on
+ * success or a negative error
+ */
+
+static int allocate_cg_links(int count, struct list_head *tmp)
+{
+       struct cg_cgroup_link *link;
+       int i;
+       INIT_LIST_HEAD(tmp);
+       for (i = 0; i < count; i++) {
+               link = kmalloc(sizeof(*link), GFP_KERNEL);
+               if (!link) {
+                       while (!list_empty(tmp)) {
+                               link = list_entry(tmp->next,
+                                                 struct cg_cgroup_link,
+                                                 cont_link_list);
+                               list_del(&link->cont_link_list);
+                               kfree(link);
+                       }
+                       return -ENOMEM;
+               }
+               list_add(&link->cont_link_list, tmp);
+       }
+       return 0;
+}
+
+static void free_cg_links(struct list_head *tmp)
+{
+       while (!list_empty(tmp)) {
+               struct cg_cgroup_link *link;
+               link = list_entry(tmp->next,
+                                 struct cg_cgroup_link,
+                                 cont_link_list);
+               list_del(&link->cont_link_list);
+               kfree(link);
+       }
+}
+
+/*
+ * find_css_set() takes an existing cgroup group and a
+ * cgroup object, and returns a css_set object that's
+ * equivalent to the old group, but with the given cgroup
+ * substituted into the appropriate hierarchy. Must be called with
+ * cgroup_mutex held
+ */
+
+static struct css_set *find_css_set(
+       struct css_set *oldcg, struct cgroup *cont)
+{
+       struct css_set *res;
+       struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
+       int i;
+
+       struct list_head tmp_cg_links;
+       struct cg_cgroup_link *link;
+
+       /* First see if we already have a cgroup group that matches
+        * the desired set */
+       write_lock(&css_set_lock);
+       res = find_existing_css_set(oldcg, cont, template);
+       if (res)
+               get_css_set(res);
+       write_unlock(&css_set_lock);
+
+       if (res)
+               return res;
+
+       res = kmalloc(sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return NULL;
+
+       /* Allocate all the cg_cgroup_link objects that we'll need */
+       if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
+               kfree(res);
+               return NULL;
+       }
+
+       kref_init(&res->ref);
+       INIT_LIST_HEAD(&res->cg_links);
+       INIT_LIST_HEAD(&res->tasks);
+
+       /* Copy the set of subsystem state objects generated in
+        * find_existing_css_set() */
+       memcpy(res->subsys, template, sizeof(res->subsys));
+
+       write_lock(&css_set_lock);
+       /* Add reference counts and links from the new css_set. */
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+               struct cgroup *cont = res->subsys[i]->cgroup;
+               struct cgroup_subsys *ss = subsys[i];
+               atomic_inc(&cont->count);
+               /*
+                * We want to add a link once per cgroup, so we
+                * only do it for the first subsystem in each
+                * hierarchy
+                */
+               if (ss->root->subsys_list.next == &ss->sibling) {
+                       BUG_ON(list_empty(&tmp_cg_links));
+                       link = list_entry(tmp_cg_links.next,
+                                         struct cg_cgroup_link,
+                                         cont_link_list);
+                       list_del(&link->cont_link_list);
+                       list_add(&link->cont_link_list, &cont->css_sets);
+                       link->cg = res;
+                       list_add(&link->cg_link_list, &res->cg_links);
+               }
+       }
+       if (list_empty(&rootnode.subsys_list)) {
+               link = list_entry(tmp_cg_links.next,
+                                 struct cg_cgroup_link,
+                                 cont_link_list);
+               list_del(&link->cont_link_list);
+               list_add(&link->cont_link_list, &dummytop->css_sets);
+               link->cg = res;
+               list_add(&link->cg_link_list, &res->cg_links);
+       }
+
+       BUG_ON(!list_empty(&tmp_cg_links));
+
+       /* Link this cgroup group into the list */
+       list_add(&res->list, &init_css_set.list);
+       css_set_count++;
+       INIT_LIST_HEAD(&res->tasks);
+       write_unlock(&css_set_lock);
+
+       return res;
 }
 
 /*
@@ -213,8 +535,6 @@ static void put_css_set(struct css_set *cg)
  * update of a tasks cgroup pointer by attach_task()
  */
 
-static DEFINE_MUTEX(cgroup_mutex);
-
 /**
  * cgroup_lock - lock out any changes to cgroup structures
  *
@@ -247,13 +567,15 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cont);
 static struct inode_operations cgroup_dir_inode_operations;
+static struct file_operations proc_cgroupstats_operations;
+
+static struct backing_dev_info cgroup_backing_dev_info = {
+       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
 
 static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
 {
        struct inode *inode = new_inode(sb);
-       static struct backing_dev_info cgroup_backing_dev_info = {
-               .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
-       };
 
        if (inode) {
                inode->i_mode = mode;
@@ -272,6 +594,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
        if (S_ISDIR(inode->i_mode)) {
                struct cgroup *cont = dentry->d_fsdata;
                BUG_ON(!(cgroup_is_removed(cont)));
+               /* It's possible for external users to be holding css
+                * reference counts on a cgroup; css_put() needs to
+                * be able to access the cgroup after decrementing
+                * the reference count in order to know if it needs to
+                * queue the cgroup to be handled by the release
+                * agent */
+               synchronize_rcu();
                kfree(cont);
        }
        iput(inode);
@@ -403,6 +732,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_printf(seq, ",%s", ss->name);
        if (test_bit(ROOT_NOPREFIX, &root->flags))
                seq_puts(seq, ",noprefix");
+       if (strlen(root->release_agent_path))
+               seq_printf(seq, ",release_agent=%s", root->release_agent_path);
        mutex_unlock(&cgroup_mutex);
        return 0;
 }
@@ -410,6 +741,7 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
 struct cgroup_sb_opts {
        unsigned long subsys_bits;
        unsigned long flags;
+       char *release_agent;
 };
 
 /* Convert a hierarchy specifier into a bitmask of subsystems and
@@ -421,6 +753,7 @@ static int parse_cgroupfs_options(char *data,
 
        opts->subsys_bits = 0;
        opts->flags = 0;
+       opts->release_agent = NULL;
 
        while ((token = strsep(&o, ",")) != NULL) {
                if (!*token)
@@ -429,6 +762,15 @@ static int parse_cgroupfs_options(char *data,
                        opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1;
                } else if (!strcmp(token, "noprefix")) {
                        set_bit(ROOT_NOPREFIX, &opts->flags);
+               } else if (!strncmp(token, "release_agent=", 14)) {
+                       /* Specifying two release agents is forbidden */
+                       if (opts->release_agent)
+                               return -EINVAL;
+                       opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
+                       if (!opts->release_agent)
+                               return -ENOMEM;
+                       strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
+                       opts->release_agent[PATH_MAX - 1] = 0;
                } else {
                        struct cgroup_subsys *ss;
                        int i;
@@ -478,7 +820,11 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
        if (!ret)
                cgroup_populate_dir(cont);
 
+       if (opts.release_agent)
+               strcpy(root->release_agent_path, opts.release_agent);
  out_unlock:
+       if (opts.release_agent)
+               kfree(opts.release_agent);
        mutex_unlock(&cgroup_mutex);
        mutex_unlock(&cont->dentry->d_inode->i_mutex);
        return ret;
@@ -501,6 +847,8 @@ static void init_cgroup_root(struct cgroupfs_root *root)
        cont->top_cgroup = cont;
        INIT_LIST_HEAD(&cont->sibling);
        INIT_LIST_HEAD(&cont->children);
+       INIT_LIST_HEAD(&cont->css_sets);
+       INIT_LIST_HEAD(&cont->release_list);
 }
 
 static int cgroup_test_super(struct super_block *sb, void *data)
@@ -570,11 +918,16 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
        int ret = 0;
        struct super_block *sb;
        struct cgroupfs_root *root;
+       struct list_head tmp_cg_links, *l;
+       INIT_LIST_HEAD(&tmp_cg_links);
 
        /* First find the desired set of subsystems */
        ret = parse_cgroupfs_options(data, &opts);
-       if (ret)
+       if (ret) {
+               if (opts.release_agent)
+                       kfree(opts.release_agent);
                return ret;
+       }
 
        root = kzalloc(sizeof(*root), GFP_KERNEL);
        if (!root)
@@ -583,6 +936,10 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
        init_cgroup_root(root);
        root->subsys_bits = opts.subsys_bits;
        root->flags = opts.flags;
+       if (opts.release_agent) {
+               strcpy(root->release_agent_path, opts.release_agent);
+               kfree(opts.release_agent);
+       }
 
        sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
 
@@ -599,18 +956,36 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
        } else {
                /* New superblock */
                struct cgroup *cont = &root->top_cgroup;
+               struct inode *inode;
 
                BUG_ON(sb->s_root != NULL);
 
                ret = cgroup_get_rootdir(sb);
                if (ret)
                        goto drop_new_super;
+               inode = sb->s_root->d_inode;
 
+               mutex_lock(&inode->i_mutex);
                mutex_lock(&cgroup_mutex);
 
+               /*
+                * We're accessing css_set_count without locking
+                * css_set_lock here, but that's OK - it can only be
+                * increased by someone holding cgroup_lock, and
+                * that's us. The worst that can happen is that we
+                * have some link structures left over
+                */
+               ret = allocate_cg_links(css_set_count, &tmp_cg_links);
+               if (ret) {
+                       mutex_unlock(&cgroup_mutex);
+                       mutex_unlock(&inode->i_mutex);
+                       goto drop_new_super;
+               }
+
                ret = rebind_subsystems(root, root->subsys_bits);
                if (ret == -EBUSY) {
                        mutex_unlock(&cgroup_mutex);
+                       mutex_unlock(&inode->i_mutex);
                        goto drop_new_super;
                }
 
@@ -618,24 +993,40 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
                BUG_ON(ret);
 
                list_add(&root->root_list, &roots);
+               root_count++;
 
                sb->s_root->d_fsdata = &root->top_cgroup;
                root->top_cgroup.dentry = sb->s_root;
 
+               /* Link the top cgroup in this hierarchy into all
+                * the css_set objects */
+               write_lock(&css_set_lock);
+               l = &init_css_set.list;
+               do {
+                       struct css_set *cg;
+                       struct cg_cgroup_link *link;
+                       cg = list_entry(l, struct css_set, list);
+                       BUG_ON(list_empty(&tmp_cg_links));
+                       link = list_entry(tmp_cg_links.next,
+                                         struct cg_cgroup_link,
+                                         cont_link_list);
+                       list_del(&link->cont_link_list);
+                       link->cg = cg;
+                       list_add(&link->cont_link_list,
+                                &root->top_cgroup.css_sets);
+                       list_add(&link->cg_link_list, &cg->cg_links);
+                       l = l->next;
+               } while (l != &init_css_set.list);
+               write_unlock(&css_set_lock);
+
+               free_cg_links(&tmp_cg_links);
+
                BUG_ON(!list_empty(&cont->sibling));
                BUG_ON(!list_empty(&cont->children));
                BUG_ON(root->number_of_cgroups != 1);
 
-               /*
-                * I believe that it's safe to nest i_mutex inside
-                * cgroup_mutex in this case, since no-one else can
-                * be accessing this directory yet. But we still need
-                * to teach lockdep that this is the case - currently
-                * a cgroupfs remount triggers a lockdep warning
-                */
-               mutex_lock(&cont->dentry->d_inode->i_mutex);
                cgroup_populate_dir(cont);
-               mutex_unlock(&cont->dentry->d_inode->i_mutex);
+               mutex_unlock(&inode->i_mutex);
                mutex_unlock(&cgroup_mutex);
        }
 
@@ -644,6 +1035,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
  drop_new_super:
        up_write(&sb->s_umount);
        deactivate_super(sb);
+       free_cg_links(&tmp_cg_links);
        return ret;
 }
 
@@ -665,8 +1057,25 @@ static void cgroup_kill_sb(struct super_block *sb) {
        /* Shouldn't be able to fail ... */
        BUG_ON(ret);
 
-       if (!list_empty(&root->root_list))
+       /*
+        * Release all the links from css_sets to this hierarchy's
+        * root cgroup
+        */
+       write_lock(&css_set_lock);
+       while (!list_empty(&cont->css_sets)) {
+               struct cg_cgroup_link *link;
+               link = list_entry(cont->css_sets.next,
+                                 struct cg_cgroup_link, cont_link_list);
+               list_del(&link->cg_link_list);
+               list_del(&link->cont_link_list);
+               kfree(link);
+       }
+       write_unlock(&css_set_lock);
+
+       if (!list_empty(&root->root_list)) {
                list_del(&root->root_list);
+               root_count--;
+       }
        mutex_unlock(&cgroup_mutex);
 
        kfree(root);
@@ -759,9 +1168,9 @@ static int attach_task(struct cgroup *cont, struct task_struct *tsk)
        int retval = 0;
        struct cgroup_subsys *ss;
        struct cgroup *oldcont;
-       struct css_set *cg = &tsk->cgroups;
+       struct css_set *cg = tsk->cgroups;
+       struct css_set *newcg;
        struct cgroupfs_root *root = cont->root;
-       int i;
        int subsys_id;
 
        get_first_subsys(cont, NULL, &subsys_id);
@@ -780,33 +1189,40 @@ static int attach_task(struct cgroup *cont, struct task_struct *tsk)
                }
        }
 
+       /*
+        * Locate or allocate a new css_set for this task,
+        * based on its final set of cgroups
+        */
+       newcg = find_css_set(cg, cont);
+       if (!newcg) {
+               return -ENOMEM;
+       }
+
        task_lock(tsk);
        if (tsk->flags & PF_EXITING) {
                task_unlock(tsk);
+               put_css_set(newcg);
                return -ESRCH;
        }
-       /* Update the css_set pointers for the subsystems in this
-        * hierarchy */
-       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-               if (root->subsys_bits & (1ull << i)) {
-                       /* Subsystem is in this hierarchy. So we want
-                        * the subsystem state from the new
-                        * cgroup. Transfer the refcount from the
-                        * old to the new */
-                       atomic_inc(&cont->count);
-                       atomic_dec(&cg->subsys[i]->cgroup->count);
-                       rcu_assign_pointer(cg->subsys[i], cont->subsys[i]);
-               }
-       }
+       rcu_assign_pointer(tsk->cgroups, newcg);
        task_unlock(tsk);
 
+       /* Update the css_set linked lists if we're using them */
+       write_lock(&css_set_lock);
+       if (!list_empty(&tsk->cg_list)) {
+               list_del(&tsk->cg_list);
+               list_add(&tsk->cg_list, &newcg->tasks);
+       }
+       write_unlock(&css_set_lock);
+
        for_each_subsys(root, ss) {
                if (ss->attach) {
                        ss->attach(ss, cont, oldcont, tsk);
                }
        }
-
+       set_bit(CONT_RELEASABLE, &oldcont->flags);
        synchronize_rcu();
+       put_css_set(cg);
        return 0;
 }
 
@@ -854,6 +1270,9 @@ enum cgroup_filetype {
        FILE_ROOT,
        FILE_DIR,
        FILE_TASKLIST,
+       FILE_NOTIFY_ON_RELEASE,
+       FILE_RELEASABLE,
+       FILE_RELEASE_AGENT,
 };
 
 static ssize_t cgroup_write_uint(struct cgroup *cont, struct cftype *cft,
@@ -924,6 +1343,32 @@ static ssize_t cgroup_common_file_write(struct cgroup *cont,
        case FILE_TASKLIST:
                retval = attach_task_by_pid(cont, buffer);
                break;
+       case FILE_NOTIFY_ON_RELEASE:
+               clear_bit(CONT_RELEASABLE, &cont->flags);
+               if (simple_strtoul(buffer, NULL, 10) != 0)
+                       set_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
+               else
+                       clear_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
+               break;
+       case FILE_RELEASE_AGENT:
+       {
+               struct cgroupfs_root *root = cont->root;
+               /* Strip trailing newline */
+               if (nbytes && (buffer[nbytes-1] == '\n')) {
+                       buffer[nbytes-1] = 0;
+               }
+               if (nbytes < sizeof(root->release_agent_path)) {
+                       /* We never write anything other than '\0'
+                        * into the last char of release_agent_path,
+                        * so it always remains a NUL-terminated
+                        * string */
+                       strncpy(root->release_agent_path, buffer, nbytes);
+                       root->release_agent_path[nbytes] = 0;
+               } else {
+                       retval = -ENOSPC;
+               }
+               break;
+       }
        default:
                retval = -EINVAL;
                goto out2;
@@ -965,6 +1410,49 @@ static ssize_t cgroup_read_uint(struct cgroup *cont, struct cftype *cft,
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
+static ssize_t cgroup_common_file_read(struct cgroup *cont,
+                                         struct cftype *cft,
+                                         struct file *file,
+                                         char __user *buf,
+                                         size_t nbytes, loff_t *ppos)
+{
+       enum cgroup_filetype type = cft->private;
+       char *page;
+       ssize_t retval = 0;
+       char *s;
+
+       if (!(page = (char *)__get_free_page(GFP_KERNEL)))
+               return -ENOMEM;
+
+       s = page;
+
+       switch (type) {
+       case FILE_RELEASE_AGENT:
+       {
+               struct cgroupfs_root *root;
+               size_t n;
+               mutex_lock(&cgroup_mutex);
+               root = cont->root;
+               n = strnlen(root->release_agent_path,
+                           sizeof(root->release_agent_path));
+               n = min(n, (size_t) PAGE_SIZE);
+               strncpy(s, root->release_agent_path, n);
+               mutex_unlock(&cgroup_mutex);
+               s += n;
+               break;
+       }
+       default:
+               retval = -EINVAL;
+               goto out;
+       }
+       *s++ = '\n';
+
+       retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
+out:
+       free_page((unsigned long)page);
+       return retval;
+}
+
 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
                                   size_t nbytes, loff_t *ppos)
 {
@@ -1066,7 +1554,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
 
                /* start with the directory inode held, so that we can
                 * populate it without racing with another mkdir */
-               mutex_lock(&inode->i_mutex);
+               mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
        } else if (S_ISREG(mode)) {
                inode->i_size = 0;
                inode->i_fop = &cgroup_file_operations;
@@ -1145,27 +1633,101 @@ int cgroup_add_files(struct cgroup *cont,
        return 0;
 }
 
-/* Count the number of tasks in a cgroup. Could be made more
- * time-efficient but less space-efficient with more linked lists
- * running through each cgroup and the css_set structures that
- * referenced it. Must be called with tasklist_lock held for read or
- * write or in an rcu critical section.
- */
-int __cgroup_task_count(const struct cgroup *cont)
+/* Count the number of tasks in a cgroup. */
+
+int cgroup_task_count(const struct cgroup *cont)
 {
        int count = 0;
-       struct task_struct *g, *p;
-       struct cgroup_subsys_state *css;
-       int subsys_id;
-
-       get_first_subsys(cont, &css, &subsys_id);
-       do_each_thread(g, p) {
-               if (task_subsys_state(p, subsys_id) == css)
-                       count ++;
-       } while_each_thread(g, p);
+       struct list_head *l;
+
+       read_lock(&css_set_lock);
+       l = cont->css_sets.next;
+       while (l != &cont->css_sets) {
+               struct cg_cgroup_link *link =
+                       list_entry(l, struct cg_cgroup_link, cont_link_list);
+               count += atomic_read(&link->cg->ref.refcount);
+               l = l->next;
+       }
+       read_unlock(&css_set_lock);
        return count;
 }
 
+/*
+ * Advance a list_head iterator.  The iterator should be positioned at
+ * the start of a css_set
+ */
+static void cgroup_advance_iter(struct cgroup *cont,
+                                         struct cgroup_iter *it)
+{
+       struct list_head *l = it->cg_link;
+       struct cg_cgroup_link *link;
+       struct css_set *cg;
+
+       /* Advance to the next non-empty css_set */
+       do {
+               l = l->next;
+               if (l == &cont->css_sets) {
+                       it->cg_link = NULL;
+                       return;
+               }
+               link = list_entry(l, struct cg_cgroup_link, cont_link_list);
+               cg = link->cg;
+       } while (list_empty(&cg->tasks));
+       it->cg_link = l;
+       it->task = cg->tasks.next;
+}
+
+void cgroup_iter_start(struct cgroup *cont, struct cgroup_iter *it)
+{
+       /*
+        * The first time anyone tries to iterate across a cgroup,
+        * we need to enable the list linking each css_set to its
+        * tasks, and fix up all existing tasks.
+        */
+       if (!use_task_css_set_links) {
+               struct task_struct *p, *g;
+               write_lock(&css_set_lock);
+               use_task_css_set_links = 1;
+               do_each_thread(g, p) {
+                       task_lock(p);
+                       if (list_empty(&p->cg_list))
+                               list_add(&p->cg_list, &p->cgroups->tasks);
+                       task_unlock(p);
+               } while_each_thread(g, p);
+               write_unlock(&css_set_lock);
+       }
+       read_lock(&css_set_lock);
+       it->cg_link = &cont->css_sets;
+       cgroup_advance_iter(cont, it);
+}
+
+struct task_struct *cgroup_iter_next(struct cgroup *cont,
+                                       struct cgroup_iter *it)
+{
+       struct task_struct *res;
+       struct list_head *l = it->task;
+
+       /* If the iterator cg is NULL, we have no tasks */
+       if (!it->cg_link)
+               return NULL;
+       res = list_entry(l, struct task_struct, cg_list);
+       /* Advance iterator to find next entry */
+       l = l->next;
+       if (l == &res->cgroups->tasks) {
+               /* We reached the end of this task list - move on to
+                * the next cg_cgroup_link */
+               cgroup_advance_iter(cont, it);
+       } else {
+               it->task = l;
+       }
+       return res;
+}
+
+void cgroup_iter_end(struct cgroup *cont, struct cgroup_iter *it)
+{
+       read_unlock(&css_set_lock);
+}
+
 /*
  * Stuff for reading the 'tasks' file.
  *
@@ -1195,23 +1757,68 @@ struct ctr_struct {
 static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont)
 {
        int n = 0;
-       struct task_struct *g, *p;
-       struct cgroup_subsys_state *css;
-       int subsys_id;
+       struct cgroup_iter it;
+       struct task_struct *tsk;
+       cgroup_iter_start(cont, &it);
+       while ((tsk = cgroup_iter_next(cont, &it))) {
+               if (unlikely(n == npids))
+                       break;
+               pidarray[n++] = pid_nr(task_pid(tsk));
+       }
+       cgroup_iter_end(cont, &it);
+       return n;
+}
+
+/**
+ * Build and fill cgroupstats so that taskstats can export it to user
+ * space.
+ *
+ * @stats: cgroupstats to fill information into
+ * @dentry: A dentry entry belonging to the cgroup for which stats have
+ * been requested.
+ */
+int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
+{
+       int ret = -EINVAL;
+       struct cgroup *cont;
+       struct cgroup_iter it;
+       struct task_struct *tsk;
+       /*
+        * Validate dentry by checking the superblock operations
+        */
+       if (dentry->d_sb->s_op != &cgroup_ops)
+                goto err;
 
-       get_first_subsys(cont, &css, &subsys_id);
+       ret = 0;
+       cont = dentry->d_fsdata;
        rcu_read_lock();
-       do_each_thread(g, p) {
-               if (task_subsys_state(p, subsys_id) == css) {
-                       pidarray[n++] = pid_nr(task_pid(p));
-                       if (unlikely(n == npids))
-                               goto array_full;
+
+       cgroup_iter_start(cont, &it);
+       while ((tsk = cgroup_iter_next(cont, &it))) {
+               switch (tsk->state) {
+               case TASK_RUNNING:
+                       stats->nr_running++;
+                       break;
+               case TASK_INTERRUPTIBLE:
+                       stats->nr_sleeping++;
+                       break;
+               case TASK_UNINTERRUPTIBLE:
+                       stats->nr_uninterruptible++;
+                       break;
+               case TASK_STOPPED:
+                       stats->nr_stopped++;
+                       break;
+               default:
+                       if (delayacct_is_task_waiting_on_io(tsk))
+                               stats->nr_io_wait++;
+                       break;
                }
-       } while_each_thread(g, p);
+       }
+       cgroup_iter_end(cont, &it);
 
-array_full:
        rcu_read_unlock();
-       return n;
+err:
+       return ret;
 }
 
 static int cmppid(const void *a, const void *b)
@@ -1316,16 +1923,49 @@ static int cgroup_tasks_release(struct inode *unused_inode,
        return 0;
 }
 
+static u64 cgroup_read_notify_on_release(struct cgroup *cont,
+                                           struct cftype *cft)
+{
+       return notify_on_release(cont);
+}
+
+static u64 cgroup_read_releasable(struct cgroup *cont, struct cftype *cft)
+{
+       return test_bit(CONT_RELEASABLE, &cont->flags);
+}
+
 /*
  * for the common functions, 'private' gives the type of file
  */
-static struct cftype cft_tasks = {
-       .name = "tasks",
-       .open = cgroup_tasks_open,
-       .read = cgroup_tasks_read,
+static struct cftype files[] = {
+       {
+               .name = "tasks",
+               .open = cgroup_tasks_open,
+               .read = cgroup_tasks_read,
+               .write = cgroup_common_file_write,
+               .release = cgroup_tasks_release,
+               .private = FILE_TASKLIST,
+       },
+
+       {
+               .name = "notify_on_release",
+               .read_uint = cgroup_read_notify_on_release,
+               .write = cgroup_common_file_write,
+               .private = FILE_NOTIFY_ON_RELEASE,
+       },
+
+       {
+               .name = "releasable",
+               .read_uint = cgroup_read_releasable,
+               .private = FILE_RELEASABLE,
+       }
+};
+
+static struct cftype cft_release_agent = {
+       .name = "release_agent",
+       .read = cgroup_common_file_read,
        .write = cgroup_common_file_write,
-       .release = cgroup_tasks_release,
-       .private = FILE_TASKLIST,
+       .private = FILE_RELEASE_AGENT,
 };
 
 static int cgroup_populate_dir(struct cgroup *cont)
@@ -1336,10 +1976,15 @@ static int cgroup_populate_dir(struct cgroup *cont)
        /* First clear out any existing files */
        cgroup_clear_directory(cont->dentry);
 
-       err = cgroup_add_file(cont, NULL, &cft_tasks);
+       err = cgroup_add_files(cont, NULL, files, ARRAY_SIZE(files));
        if (err < 0)
                return err;
 
+       if (cont == cont->top_cgroup) {
+               if ((err = cgroup_add_file(cont, NULL, &cft_release_agent)) < 0)
+                       return err;
+       }
+
        for_each_subsys(cont->root, ss) {
                if (ss->populate && (err = ss->populate(ss, cont)) < 0)
                        return err;
@@ -1395,6 +2040,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        cont->flags = 0;
        INIT_LIST_HEAD(&cont->sibling);
        INIT_LIST_HEAD(&cont->children);
+       INIT_LIST_HEAD(&cont->css_sets);
+       INIT_LIST_HEAD(&cont->release_list);
 
        cont->parent = parent;
        cont->root = parent->root;
@@ -1456,6 +2103,38 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        return cgroup_create(c_parent, dentry, mode | S_IFDIR);
 }
 
+static inline int cgroup_has_css_refs(struct cgroup *cont)
+{
+       /* Check the reference count on each subsystem. Since we
+        * already established that there are no tasks in the
+        * cgroup, if the css refcount is also 0, then there should
+        * be no outstanding references, so the subsystem is safe to
+        * destroy. We scan across all subsystems rather than using
+        * the per-hierarchy linked list of mounted subsystems since
+        * we can be called via check_for_release() with no
+        * synchronization other than RCU, and the subsystem linked
+        * list isn't RCU-safe */
+       int i;
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+               struct cgroup_subsys *ss = subsys[i];
+               struct cgroup_subsys_state *css;
+               /* Skip subsystems not in this hierarchy */
+               if (ss->root != cont->root)
+                       continue;
+               css = cont->subsys[ss->subsys_id];
+               /* When called from check_for_release() it's possible
+                * that by this point the cgroup has been removed
+                * and the css deleted. But a false-positive doesn't
+                * matter, since it can only happen if the cgroup
+                * has been deleted and hence no longer needs the
+                * release agent to be called anyway. */
+               if (css && atomic_read(&css->refcnt)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
+
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
 {
        struct cgroup *cont = dentry->d_fsdata;
@@ -1464,7 +2143,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
        struct cgroup_subsys *ss;
        struct super_block *sb;
        struct cgroupfs_root *root;
-       int css_busy = 0;
 
        /* the vfs holds both inode->i_mutex already */
 
@@ -1482,20 +2160,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
        root = cont->root;
        sb = root->sb;
 
-       /* Check the reference count on each subsystem. Since we
-        * already established that there are no tasks in the
-        * cgroup, if the css refcount is also 0, then there should
-        * be no outstanding references, so the subsystem is safe to
-        * destroy */
-       for_each_subsys(root, ss) {
-               struct cgroup_subsys_state *css;
-               css = cont->subsys[ss->subsys_id];
-               if (atomic_read(&css->refcnt)) {
-                       css_busy = 1;
-                       break;
-               }
-       }
-       if (css_busy) {
+       if (cgroup_has_css_refs(cont)) {
                mutex_unlock(&cgroup_mutex);
                return -EBUSY;
        }
@@ -1505,7 +2170,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
                        ss->destroy(ss, cont);
        }
 
+       spin_lock(&release_list_lock);
        set_bit(CONT_REMOVED, &cont->flags);
+       if (!list_empty(&cont->release_list))
+               list_del(&cont->release_list);
+       spin_unlock(&release_list_lock);
        /* delete my sibling from parent->children */
        list_del(&cont->sibling);
        spin_lock(&cont->dentry->d_lock);
@@ -1517,6 +2186,9 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
        dput(d);
        root->number_of_cgroups--;
 
+       set_bit(CONT_RELEASABLE, &parent->flags);
+       check_for_release(parent);
+
        mutex_unlock(&cgroup_mutex);
        /* Drop the active superblock reference that we took when we
         * created the cgroup */
@@ -1526,8 +2198,8 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
 
 static void cgroup_init_subsys(struct cgroup_subsys *ss)
 {
-       struct task_struct *g, *p;
        struct cgroup_subsys_state *css;
+       struct list_head *l;
        printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name);
 
        /* Create the top cgroup state for this subsystem */
@@ -1537,26 +2209,32 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
        BUG_ON(IS_ERR(css));
        init_cgroup_css(css, ss, dummytop);
 
-       /* Update all tasks to contain a subsys pointer to this state
-        * - since the subsystem is newly registered, all tasks are in
-        * the subsystem's top cgroup. */
+       /* Update all cgroup groups to contain a subsys
+        * pointer to this state - since the subsystem is
+        * newly registered, all tasks and hence all cgroup
+        * groups are in the subsystem's top cgroup. */
+       write_lock(&css_set_lock);
+       l = &init_css_set.list;
+       do {
+               struct css_set *cg =
+                       list_entry(l, struct css_set, list);
+               cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
+               l = l->next;
+       } while (l != &init_css_set.list);
+       write_unlock(&css_set_lock);
 
        /* If this subsystem requested that it be notified with fork
         * events, we should send it one now for every process in the
         * system */
+       if (ss->fork) {
+               struct task_struct *g, *p;
 
-       read_lock(&tasklist_lock);
-       init_task.cgroups.subsys[ss->subsys_id] = css;
-       if (ss->fork)
-               ss->fork(ss, &init_task);
-
-       do_each_thread(g, p) {
-               printk(KERN_INFO "Setting task %p css to %p (%d)\n", css, p, p->pid);
-               p->cgroups.subsys[ss->subsys_id] = css;
-               if (ss->fork)
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
                        ss->fork(ss, p);
-       } while_each_thread(g, p);
-       read_unlock(&tasklist_lock);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+       }
 
        need_forkexit_callback |= ss->fork || ss->exit;
 
@@ -1570,8 +2248,22 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
 int __init cgroup_init_early(void)
 {
        int i;
+       kref_init(&init_css_set.ref);
+       kref_get(&init_css_set.ref);
+       INIT_LIST_HEAD(&init_css_set.list);
+       INIT_LIST_HEAD(&init_css_set.cg_links);
+       INIT_LIST_HEAD(&init_css_set.tasks);
+       css_set_count = 1;
        init_cgroup_root(&rootnode);
        list_add(&rootnode.root_list, &roots);
+       root_count = 1;
+       init_task.cgroups = &init_css_set;
+
+       init_css_set_link.cg = &init_css_set;
+       list_add(&init_css_set_link.cont_link_list,
+                &rootnode.top_cgroup.css_sets);
+       list_add(&init_css_set_link.cg_link_list,
+                &init_css_set.cg_links);
 
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
@@ -1600,6 +2292,11 @@ int __init cgroup_init(void)
 {
        int err;
        int i;
+       struct proc_dir_entry *entry;
+
+       err = bdi_init(&cgroup_backing_dev_info);
+       if (err)
+               return err;
 
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
@@ -1611,10 +2308,126 @@ int __init cgroup_init(void)
        if (err < 0)
                goto out;
 
+       entry = create_proc_entry("cgroups", 0, NULL);
+       if (entry)
+               entry->proc_fops = &proc_cgroupstats_operations;
+
 out:
+       if (err)
+               bdi_destroy(&cgroup_backing_dev_info);
+
        return err;
 }
 
+/*
+ * proc_cgroup_show()
+ *  - Print task's cgroup paths into seq_file, one line for each hierarchy
+ *  - Used for /proc/<pid>/cgroup.
+ *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
+ *    doesn't really matter if tsk->cgroup changes after we read it,
+ *    and we take cgroup_mutex, keeping attach_task() from changing it
+ *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
+ *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
+ *    cgroup to top_cgroup.
+ */
+
+/* TODO: Use a proper seq_file iterator */
+static int proc_cgroup_show(struct seq_file *m, void *v)
+{
+       struct pid *pid;
+       struct task_struct *tsk;
+       char *buf;
+       int retval;
+       struct cgroupfs_root *root;
+
+       retval = -ENOMEM;
+       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       retval = -ESRCH;
+       pid = m->private;
+       tsk = get_pid_task(pid, PIDTYPE_PID);
+       if (!tsk)
+               goto out_free;
+
+       retval = 0;
+
+       mutex_lock(&cgroup_mutex);
+
+       for_each_root(root) {
+               struct cgroup_subsys *ss;
+               struct cgroup *cont;
+               int subsys_id;
+               int count = 0;
+
+               /* Skip this hierarchy if it has no active subsystems */
+               if (!root->actual_subsys_bits)
+                       continue;
+               for_each_subsys(root, ss)
+                       seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+               seq_putc(m, ':');
+               get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
+               cont = task_cgroup(tsk, subsys_id);
+               retval = cgroup_path(cont, buf, PAGE_SIZE);
+               if (retval < 0)
+                       goto out_unlock;
+               seq_puts(m, buf);
+               seq_putc(m, '\n');
+       }
+
+out_unlock:
+       mutex_unlock(&cgroup_mutex);
+       put_task_struct(tsk);
+out_free:
+       kfree(buf);
+out:
+       return retval;
+}
+
+static int cgroup_open(struct inode *inode, struct file *file)
+{
+       struct pid *pid = PROC_I(inode)->pid;
+       return single_open(file, proc_cgroup_show, pid);
+}
+
+struct file_operations proc_cgroup_operations = {
+       .open           = cgroup_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/* Display information about each subsystem and each hierarchy */
+static int proc_cgroupstats_show(struct seq_file *m, void *v)
+{
+       int i;
+       struct cgroupfs_root *root;
+
+       seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n");
+       mutex_lock(&cgroup_mutex);
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+               struct cgroup_subsys *ss = subsys[i];
+               seq_printf(m, "%s\t%lu\t%d\n",
+                          ss->name, ss->root->subsys_bits,
+                          ss->root->number_of_cgroups);
+       }
+       mutex_unlock(&cgroup_mutex);
+       return 0;
+}
+
+static int cgroupstats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, proc_cgroupstats_show, 0);
+}
+
+static struct file_operations proc_cgroupstats_operations = {
+       .open = cgroupstats_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 /**
  * cgroup_fork - attach newly forked task to its parents cgroup.
  * @tsk: pointer to task_struct of forking parent process.
@@ -1625,18 +2438,19 @@ out:
  * fork.c by dup_task_struct().  However, we ignore that copy, since
  * it was not made under the protection of RCU or cgroup_mutex, so
  * might no longer be a valid cgroup pointer.  attach_task() might
- * have already changed current->cgroup, allowing the previously
- * referenced cgroup to be removed and freed.
+ * have already changed current->cgroups, allowing the previously
+ * referenced cgroup group to be removed and freed.
  *
  * At the point that cgroup_fork() is called, 'current' is the parent
  * task, and the passed argument 'child' points to the child task.
  */
 void cgroup_fork(struct task_struct *child)
 {
-       rcu_read_lock();
-       child->cgroups = rcu_dereference(current->cgroups);
-       get_css_set(&child->cgroups);
-       rcu_read_unlock();
+       task_lock(current);
+       child->cgroups = current->cgroups;
+       get_css_set(child->cgroups);
+       task_unlock(current);
+       INIT_LIST_HEAD(&child->cg_list);
 }
 
 /**
@@ -1656,6 +2470,21 @@ void cgroup_fork_callbacks(struct task_struct *child)
        }
 }
 
+/**
+ * cgroup_post_fork - called on a new task after adding it to the
+ * task list. Adds the task to the list running through its css_set
+ * if necessary. Has to be after the task is visible on the task list
+ * in case we race with the first call to cgroup_iter_start() - to
+ * guarantee that the new task ends up on its list. */
+void cgroup_post_fork(struct task_struct *child)
+{
+       if (use_task_css_set_links) {
+               write_lock(&css_set_lock);
+               if (list_empty(&child->cg_list))
+                       list_add(&child->cg_list, &child->cgroups->tasks);
+               write_unlock(&css_set_lock);
+       }
+}
 /**
  * cgroup_exit - detach cgroup from exiting task
  * @tsk: pointer to task_struct of exiting process
@@ -1694,6 +2523,7 @@ void cgroup_fork_callbacks(struct task_struct *child)
 void cgroup_exit(struct task_struct *tsk, int run_callbacks)
 {
        int i;
+       struct css_set *cg;
 
        if (run_callbacks && need_forkexit_callback) {
                for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
@@ -1702,11 +2532,26 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
                                ss->exit(ss, tsk);
                }
        }
+
+       /*
+        * Unlink from the css_set task list if necessary.
+        * Optimistically check cg_list before taking
+        * css_set_lock
+        */
+       if (!list_empty(&tsk->cg_list)) {
+               write_lock(&css_set_lock);
+               if (!list_empty(&tsk->cg_list))
+                       list_del(&tsk->cg_list);
+               write_unlock(&css_set_lock);
+       }
+
        /* Reassign the task to the init_css_set. */
        task_lock(tsk);
-       put_css_set(&tsk->cgroups);
-       tsk->cgroups = init_task.cgroups;
+       cg = tsk->cgroups;
+       tsk->cgroups = &init_css_set;
        task_unlock(tsk);
+       if (cg)
+               put_css_set_taskexit(cg);
 }
 
 /**
@@ -1740,7 +2585,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
                mutex_unlock(&cgroup_mutex);
                return 0;
        }
-       cg = &tsk->cgroups;
+       cg = tsk->cgroups;
        parent = task_cgroup(tsk, subsys->subsys_id);
 
        snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
@@ -1748,6 +2593,8 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
        /* Pin the hierarchy */
        atomic_inc(&parent->root->sb->s_active);
 
+       /* Keep the cgroup alive */
+       get_css_set(cg);
        mutex_unlock(&cgroup_mutex);
 
        /* Now do the VFS work to create a cgroup */
@@ -1791,6 +2638,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
            (parent != task_cgroup(tsk, subsys->subsys_id))) {
                /* Aargh, we raced ... */
                mutex_unlock(&inode->i_mutex);
+               put_css_set(cg);
 
                deactivate_super(parent->root->sb);
                /* The cgroup is still accessible in the VFS, but
@@ -1814,6 +2662,10 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
 
  out_release:
        mutex_unlock(&inode->i_mutex);
+
+       mutex_lock(&cgroup_mutex);
+       put_css_set(cg);
+       mutex_unlock(&cgroup_mutex);
        deactivate_super(parent->root->sb);
        return ret;
 }
@@ -1843,3 +2695,111 @@ int cgroup_is_descendant(const struct cgroup *cont)
        ret = (cont == target);
        return ret;
 }
+
+static void check_for_release(struct cgroup *cont)
+{
+       /* All of these checks rely on RCU to keep the cgroup
+        * structure alive */
+       if (cgroup_is_releasable(cont) && !atomic_read(&cont->count)
+           && list_empty(&cont->children) && !cgroup_has_css_refs(cont)) {
+               /* Control Group is currently removeable. If it's not
+                * already queued for a userspace notification, queue
+                * it now */
+               int need_schedule_work = 0;
+               spin_lock(&release_list_lock);
+               if (!cgroup_is_removed(cont) &&
+                   list_empty(&cont->release_list)) {
+                       list_add(&cont->release_list, &release_list);
+                       need_schedule_work = 1;
+               }
+               spin_unlock(&release_list_lock);
+               if (need_schedule_work)
+                       schedule_work(&release_agent_work);
+       }
+}
+
+void __css_put(struct cgroup_subsys_state *css)
+{
+       struct cgroup *cont = css->cgroup;
+       rcu_read_lock();
+       if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cont)) {
+               set_bit(CONT_RELEASABLE, &cont->flags);
+               check_for_release(cont);
+       }
+       rcu_read_unlock();
+}
+
+/*
+ * Notify userspace when a cgroup is released, by running the
+ * configured release agent with the name of the cgroup (path
+ * relative to the root of cgroup file system) as the argument.
+ *
+ * Most likely, this user command will try to rmdir this cgroup.
+ *
+ * This races with the possibility that some other task will be
+ * attached to this cgroup before it is removed, or that some other
+ * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
+ * The presumed 'rmdir' will fail quietly if this cgroup is no longer
+ * unused, and this cgroup will be reprieved from its death sentence,
+ * to continue to serve a useful existence.  Next time it's released,
+ * we will get notified again, if it still has 'notify_on_release' set.
+ *
+ * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
+ * means only wait until the task is successfully execve()'d.  The
+ * separate release agent task is forked by call_usermodehelper(),
+ * then control in this thread returns here, without waiting for the
+ * release agent task.  We don't bother to wait because the caller of
+ * this routine has no use for the exit status of the release agent
+ * task, so no sense holding our caller up for that.
+ *
+ */
+
+static void cgroup_release_agent(struct work_struct *work)
+{
+       BUG_ON(work != &release_agent_work);
+       mutex_lock(&cgroup_mutex);
+       spin_lock(&release_list_lock);
+       while (!list_empty(&release_list)) {
+               char *argv[3], *envp[3];
+               int i;
+               char *pathbuf;
+               struct cgroup *cont = list_entry(release_list.next,
+                                                   struct cgroup,
+                                                   release_list);
+               list_del_init(&cont->release_list);
+               spin_unlock(&release_list_lock);
+               pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (!pathbuf) {
+                       spin_lock(&release_list_lock);
+                       continue;
+               }
+
+               if (cgroup_path(cont, pathbuf, PAGE_SIZE) < 0) {
+                       kfree(pathbuf);
+                       spin_lock(&release_list_lock);
+                       continue;
+               }
+
+               i = 0;
+               argv[i++] = cont->root->release_agent_path;
+               argv[i++] = (char *)pathbuf;
+               argv[i] = NULL;
+
+               i = 0;
+               /* minimal command environment */
+               envp[i++] = "HOME=/";
+               envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+               envp[i] = NULL;
+
+               /* Drop the lock while we invoke the usermode helper,
+                * since the exec could involve hitting disk and hence
+                * be a slow process */
+               mutex_unlock(&cgroup_mutex);
+               call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+               kfree(pathbuf);
+               mutex_lock(&cgroup_mutex);
+               spin_lock(&release_list_lock);
+       }
+       spin_unlock(&release_list_lock);
+       mutex_unlock(&cgroup_mutex);
+}