]> Pileus Git - ~andy/linux/commitdiff
block: make unplug timer trace event correspond to the schedule() unplug
authorJens Axboe <jaxboe@fusionio.com>
Sat, 16 Apr 2011 11:51:05 +0000 (13:51 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Sat, 16 Apr 2011 11:51:05 +0000 (13:51 +0200)
It's a pretty close match to what we had before - the timer triggering
would mean that nobody unplugged the plug in due time, in the new
scheme this matches very closely what the schedule() unplug now is.
It's essentially the difference between an explicit unplug (IO unplug)
or an implicit unplug (timer unplug, we scheduled with pending IO
queued).

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
include/trace/events/block.h
kernel/trace/blktrace.c

index 3c81210725071ad4df0ac1eff8932ebc1dbb374f..78b7b0cb7216ec1c3a86545ad86ece5cd85a24da 100644 (file)
@@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
 static void queue_unplugged(struct request_queue *q, unsigned int depth,
-                           bool force_kblockd)
+                           bool from_schedule)
 {
-       trace_block_unplug_io(q, depth);
-       __blk_run_queue(q, force_kblockd);
+       trace_block_unplug(q, depth, !from_schedule);
+       __blk_run_queue(q, from_schedule);
 
        if (q->unplugged_fn)
                q->unplugged_fn(q);
 }
 
-void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
        unsigned long flags;
@@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
                BUG_ON(!rq->q);
                if (rq->q != q) {
                        if (q) {
-                               queue_unplugged(q, depth, force_kblockd);
+                               queue_unplugged(q, depth, from_schedule);
                                spin_unlock(q->queue_lock);
                        }
                        q = rq->q;
@@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
        }
 
        if (q) {
-               queue_unplugged(q, depth, force_kblockd);
+               queue_unplugged(q, depth, from_schedule);
                spin_unlock(q->queue_lock);
        }
 
index 006e60b58306555c87e7513e2cfe0a23ce452bf9..bf366547da252077cf168522a1ff22735a10d3a5 100644 (file)
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
 
 DECLARE_EVENT_CLASS(block_unplug,
 
-       TP_PROTO(struct request_queue *q, unsigned int depth),
+       TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 
-       TP_ARGS(q, depth),
+       TP_ARGS(q, depth, explicit),
 
        TP_STRUCT__entry(
                __field( int,           nr_rq                   )
@@ -419,18 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
 );
 
 /**
- * block_unplug_io - release of operations requests in request queue
+ * block_unplug - release of operations requests in request queue
  * @q: request queue to unplug
  * @depth: number of requests just added to the queue
+ * @explicit: whether this was an explicit unplug, or one from schedule()
  *
  * Unplug request queue @q because device driver is scheduled to work
  * on elements in the request queue.
  */
-DEFINE_EVENT(block_unplug, block_unplug_io,
+DEFINE_EVENT(block_unplug, block_unplug,
 
-       TP_PROTO(struct request_queue *q, unsigned int depth),
+       TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 
-       TP_ARGS(q, depth)
+       TP_ARGS(q, depth, explicit)
 );
 
 /**
index 3e3970d53d144609af37e47b634faf77692f5396..6957aa298dfa45581b45832b7251a2ee4029000e 100644 (file)
@@ -850,16 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
                __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
 }
 
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
-                                   unsigned int depth)
+static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+                                   unsigned int depth, bool explicit)
 {
        struct blk_trace *bt = q->blk_trace;
 
        if (bt) {
                __be64 rpdu = cpu_to_be64(depth);
+               u32 what;
 
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
-                               sizeof(rpdu), &rpdu);
+               if (explicit)
+                       what = BLK_TA_UNPLUG_IO;
+               else
+                       what = BLK_TA_UNPLUG_TIMER;
+
+               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
        }
 }
 
@@ -1002,7 +1007,7 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_plug(blk_add_trace_plug, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
+       ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
        WARN_ON(ret);
        ret = register_trace_block_split(blk_add_trace_split, NULL);
        WARN_ON(ret);
@@ -1017,7 +1022,7 @@ static void blk_unregister_tracepoints(void)
        unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
        unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
        unregister_trace_block_split(blk_add_trace_split, NULL);
-       unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
+       unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
        unregister_trace_block_plug(blk_add_trace_plug, NULL);
        unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
        unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
@@ -1332,6 +1337,7 @@ static const struct {
        [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
        [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
        [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
+       [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
        [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
        [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
        [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },