]> Pileus Git - ~andy/linux/commitdiff
drbd: factor out master_bio completion and drbd_request destruction paths
authorLars Ellenberg <lars.ellenberg@linbit.com>
Mon, 26 Mar 2012 15:02:45 +0000 (17:02 +0200)
committerPhilipp Reisner <philipp.reisner@linbit.com>
Thu, 8 Nov 2012 15:58:23 +0000 (16:58 +0100)
In preparation for multiple connections and reference counting,
separate the code paths for completion of the master bio
and destruction of the request object.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
drivers/block/drbd/drbd_req.c

index c0326f54d3ab92883ff8e76937075cac348c5473..f6a785b239a4613befcceda8f2f6564131969bc5 100644 (file)
@@ -222,13 +222,44 @@ static void maybe_wakeup_conflicting_requests(struct drbd_request *req)
                wake_up(&req->w.mdev->misc_wait);
 }
 
+static
+void req_may_be_done(struct drbd_request *req)
+{
+       const unsigned long s = req->rq_state;
+       struct drbd_conf *mdev = req->w.mdev;
+       int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
+
+       /* req->master_bio still present means: Not yet completed.
+        *
+        * Unless this is RQ_POSTPONED, which will cause _req_is_done() to
+        * queue it on the retry workqueue instead of destroying it.
+        */
+       if (req->master_bio && !(s & RQ_POSTPONED))
+               return;
+
+       /* Local still pending, even though master_bio is already completed?
+        * may happen for RQ_LOCAL_ABORTED requests. */
+       if (s & RQ_LOCAL_PENDING)
+               return;
+
+       if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
+               /* this is disconnected (local only) operation,
+                * or protocol A, B, or C P_BARRIER_ACK,
+                * or killed from the transfer log due to connection loss. */
+               _req_is_done(mdev, req, rw);
+       }
+       /* else: network part and not DONE yet. that is
+        * protocol A, B, or C, barrier ack still pending... */
+}
+
 /* Helper for __req_mod().
  * Set m->bio to the master bio, if it is fit to be completed,
  * or leave it alone (it is initialized to NULL in __req_mod),
  * if it has already been completed, or cannot be completed yet.
  * If m->bio is set, the error status to be returned is placed in m->error.
  */
-void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
+static
+void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m)
 {
        const unsigned long s = req->rq_state;
        struct drbd_conf *mdev = req->w.mdev;
@@ -309,26 +340,15 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
                        D_ASSERT(s & RQ_NET_DONE);
                }
        }
-
-       if (s & RQ_LOCAL_PENDING)
-               return;
-
-       if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
-               /* this is disconnected (local only) operation,
-                * or protocol A, B, or C P_BARRIER_ACK,
-                * or killed from the transfer log due to connection loss. */
-               _req_is_done(mdev, req, rw);
-       }
-       /* else: network part and not DONE yet. that is
-        * protocol A, B, or C, barrier ack still pending... */
+       req_may_be_done(req);
 }
 
-static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
+static void req_may_be_completed_not_susp(struct drbd_request *req, struct bio_and_error *m)
 {
        struct drbd_conf *mdev = req->w.mdev;
 
        if (!drbd_suspended(mdev))
-               _req_may_be_done(req, m);
+               req_may_be_completed(req, m);
 }
 
 /* obviously this could be coded as many single functions
@@ -395,14 +415,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
                maybe_wakeup_conflicting_requests(req);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                put_ldev(mdev);
                break;
 
        case ABORT_DISK_IO:
                req->rq_state |= RQ_LOCAL_ABORTED;
                if (req->rq_state & RQ_WRITE)
-                       _req_may_be_done_not_susp(req, m);
+                       req_may_be_completed_not_susp(req, m);
                else
                        goto goto_queue_for_net_read;
                break;
@@ -413,7 +433,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
                __drbd_chk_io_error(mdev, false);
                maybe_wakeup_conflicting_requests(req);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                put_ldev(mdev);
                break;
 
@@ -421,7 +441,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                /* it is legal to fail READA */
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                put_ldev(mdev);
                break;
 
@@ -441,7 +461,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
-                       _req_may_be_done_not_susp(req, m);
+                       req_may_be_completed_not_susp(req, m);
                        break;
                }
 
@@ -458,8 +478,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                /* from __drbd_make_request
                 * or from bio_endio during read io-error recovery */
 
-               /* so we can verify the handle in the answer packet
-                * corresponding hlist_del is in _req_may_be_done() */
+               /* So we can verify the handle in the answer packet.
+                * Corresponding drbd_remove_request_interval is in
+                * req_may_be_completed() */
                D_ASSERT(drbd_interval_empty(&req->i));
                drbd_insert_interval(&mdev->read_requests, &req->i);
 
@@ -477,7 +498,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                /* assert something? */
                /* from __drbd_make_request only */
 
-               /* corresponding hlist_del is in _req_may_be_done() */
+               /* Corresponding drbd_remove_request_interval is in
+                * req_may_be_completed() */
                D_ASSERT(drbd_interval_empty(&req->i));
                drbd_insert_interval(&mdev->write_requests, &req->i);
 
@@ -539,7 +561,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state &= ~RQ_NET_QUEUED;
                /* if we did it right, tl_clear should be scheduled only after
                 * this, so this should not be necessary! */
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
 
        case HANDED_OVER_TO_NETWORK:
@@ -562,7 +584,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                }
                req->rq_state &= ~RQ_NET_QUEUED;
                req->rq_state |= RQ_NET_SENT;
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
 
        case OOS_HANDED_TO_NETWORK:
@@ -570,7 +592,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                 * as far as this connection is concerned. */
                req->rq_state &= ~RQ_NET_QUEUED;
                req->rq_state |= RQ_NET_DONE;
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
 
        case CONNECTION_LOST_WHILE_PENDING:
@@ -591,7 +613,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                if (!(req->rq_state & RQ_NET_QUEUED)) {
                        if (p)
                                goto goto_read_retry_local;
-                       _req_may_be_done(req, m); /* Allowed while state.susp */
+                       req_may_be_completed(req, m); /* Allowed while state.susp */
                }
                break;
 
@@ -624,7 +646,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
                req->rq_state &= ~RQ_NET_PENDING;
                maybe_wakeup_conflicting_requests(req);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
 
        case POSTPONE_WRITE:
@@ -636,7 +658,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                D_ASSERT(req->rq_state & RQ_NET_PENDING);
                req->rq_state |= RQ_POSTPONED;
                maybe_wakeup_conflicting_requests(req);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
 
        case NEG_ACKED:
@@ -654,13 +676,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        goto goto_read_retry_local;
 
                maybe_wakeup_conflicting_requests(req);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                /* else: done by HANDED_OVER_TO_NETWORK */
                break;
 
        goto_read_retry_local:
                if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
-                       _req_may_be_done_not_susp(req, m);
+                       req_may_be_completed_not_susp(req, m);
                        break;
                }
                D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
@@ -675,7 +697,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                if (!(req->rq_state & RQ_LOCAL_COMPLETED))
                        break;
 
-               _req_may_be_done(req, m); /* Allowed while state.susp */
+               req_may_be_completed(req, m); /* Allowed while state.susp */
                break;
 
        case RESTART_FROZEN_DISK_IO:
@@ -696,8 +718,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
        case RESEND:
                /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
                   before the connection loss (B&C only); only P_BARRIER_ACK was missing.
-                  Trowing them out of the TL here by pretending we got a BARRIER_ACK
-                  We ensure that the peer was not rebooted */
+                  Throwing them out of the TL here by pretending we got a BARRIER_ACK.
+                  During connection handshake, we ensure that the peer was not rebooted. */
                if (!(req->rq_state & RQ_NET_OK)) {
                        if (req->w.cb) {
                                drbd_queue_work(&mdev->tconn->data.work, &req->w);
@@ -723,7 +745,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)))
                                atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
                }
-               _req_may_be_done(req, m); /* Allowed while state.susp */
+               req_may_be_done(req); /* Allowed while state.susp */
                break;
 
        case DATA_RECEIVED:
@@ -731,7 +753,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                dec_ap_pending(mdev);
                req->rq_state &= ~RQ_NET_PENDING;
                req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
-               _req_may_be_done_not_susp(req, m);
+               req_may_be_completed_not_susp(req, m);
                break;
        };