]> Pileus Git - ~andy/linux/blobdiff - block/blk-cgroup.c
Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block
[~andy/linux] / block / blk-cgroup.c
index 27886935804966b8246c3495898cdaec1088b9f6..fa8f26309444d2cdda41cae813cf6f5a70f1de06 100644 (file)
@@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
 
 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
                                                  struct cgroup *);
-static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
-static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
+static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
+                             struct cgroup_taskset *);
+static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
+                          struct cgroup_taskset *);
 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
 
@@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
 struct cgroup_subsys blkio_subsys = {
        .name = "blkio",
        .create = blkiocg_create,
-       .can_attach_task = blkiocg_can_attach_task,
-       .attach_task = blkiocg_attach_task,
+       .can_attach = blkiocg_can_attach,
+       .attach = blkiocg_attach,
        .destroy = blkiocg_destroy,
        .populate = blkiocg_populate,
 #ifdef CONFIG_BLK_CGROUP
@@ -1626,30 +1628,39 @@ done:
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+                             struct cgroup_taskset *tset)
 {
+       struct task_struct *task;
        struct io_context *ioc;
        int ret = 0;
 
        /* task_lock() is needed to avoid races with exit_io_context() */
-       task_lock(tsk);
-       ioc = tsk->io_context;
-       if (ioc && atomic_read(&ioc->nr_tasks) > 1)
-               ret = -EINVAL;
-       task_unlock(tsk);
-
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               task_lock(task);
+               ioc = task->io_context;
+               if (ioc && atomic_read(&ioc->nr_tasks) > 1)
+                       ret = -EINVAL;
+               task_unlock(task);
+               if (ret)
+                       break;
+       }
        return ret;
 }
 
-static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+                          struct cgroup_taskset *tset)
 {
+       struct task_struct *task;
        struct io_context *ioc;
 
-       /* we don't lose anything even if ioc allocation fails */
-       ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
-       if (ioc) {
-               ioc_cgroup_changed(ioc);
-               put_io_context(ioc, NULL);
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               /* we don't lose anything even if ioc allocation fails */
+               ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
+               if (ioc) {
+                       ioc_cgroup_changed(ioc);
+                       put_io_context(ioc, NULL);
+               }
        }
 }