]> Pileus Git - ~andy/linux/blobdiff - fs/nfs/nfs4state.c
NFS: Fix a typo in _nfs_display_fhandle
[~andy/linux] / fs / nfs / nfs4state.c
index a42e60d3ee50a4a0ad56ce839ab36b8525b3f0a8..2f760604246f7cdca798867e32cedd84da0a5889 100644 (file)
@@ -190,30 +190,29 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
 static void nfs4_end_drain_session(struct nfs_client *clp)
 {
        struct nfs4_session *ses = clp->cl_session;
+       struct nfs4_slot_table *tbl;
        int max_slots;
 
        if (ses == NULL)
                return;
+       tbl = &ses->fc_slot_table;
        if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
-               spin_lock(&ses->fc_slot_table.slot_tbl_lock);
-               max_slots = ses->fc_slot_table.max_slots;
+               spin_lock(&tbl->slot_tbl_lock);
+               max_slots = tbl->max_slots;
                while (max_slots--) {
-                       struct rpc_task *task;
-
-                       task = rpc_wake_up_next(&ses->fc_slot_table.
-                                               slot_tbl_waitq);
-                       if (!task)
+                       if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
+                                               nfs4_set_task_privileged,
+                                               NULL) == NULL)
                                break;
-                       rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
                }
-               spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
+               spin_unlock(&tbl->slot_tbl_lock);
        }
 }
 
 static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
 {
        spin_lock(&tbl->slot_tbl_lock);
-       if (tbl->highest_used_slotid != -1) {
+       if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
                INIT_COMPLETION(tbl->complete);
                spin_unlock(&tbl->slot_tbl_lock);
                return wait_for_completion_interruptible(&tbl->complete);
@@ -917,20 +916,28 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m
        if (new != NULL) {
                new->sequence = counter;
                INIT_LIST_HEAD(&new->list);
+               new->task = NULL;
        }
        return new;
 }
 
 void nfs_release_seqid(struct nfs_seqid *seqid)
 {
-       if (!list_empty(&seqid->list)) {
-               struct nfs_seqid_counter *sequence = seqid->sequence;
+       struct nfs_seqid_counter *sequence;
+
+       if (list_empty(&seqid->list))
+               return;
+       sequence = seqid->sequence;
+       spin_lock(&sequence->lock);
+       list_del_init(&seqid->list);
+       if (!list_empty(&sequence->list)) {
+               struct nfs_seqid *next;
 
-               spin_lock(&sequence->lock);
-               list_del_init(&seqid->list);
-               spin_unlock(&sequence->lock);
-               rpc_wake_up(&sequence->wait);
+               next = list_first_entry(&sequence->list,
+                               struct nfs_seqid, list);
+               rpc_wake_up_queued_task(&sequence->wait, next->task);
        }
+       spin_unlock(&sequence->lock);
 }
 
 void nfs_free_seqid(struct nfs_seqid *seqid)
@@ -1001,6 +1008,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
        int status = 0;
 
        spin_lock(&sequence->lock);
+       seqid->task = task;
        if (list_empty(&seqid->list))
                list_add_tail(&seqid->list, &sequence->list);
        if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
@@ -1029,19 +1037,28 @@ static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
 void nfs4_schedule_state_manager(struct nfs_client *clp)
 {
        struct task_struct *task;
+       char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
 
        if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
                return;
        __module_get(THIS_MODULE);
        atomic_inc(&clp->cl_count);
-       task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
-                               rpc_peeraddr2str(clp->cl_rpcclient,
-                                                       RPC_DISPLAY_ADDR));
-       if (!IS_ERR(task))
-               return;
-       nfs4_clear_state_manager_bit(clp);
-       nfs_put_client(clp);
-       module_put(THIS_MODULE);
+
+       /* The rcu_read_lock() is not strictly necessary, as the state
+        * manager is the only thread that ever changes the rpc_xprt
+        * after it's initialized.  At this point, we're single threaded. */
+       rcu_read_lock();
+       snprintf(buf, sizeof(buf), "%s-manager",
+                       rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+       rcu_read_unlock();
+       task = kthread_run(nfs4_run_state_manager, clp, buf);
+       if (IS_ERR(task)) {
+               printk(KERN_ERR "%s: kthread_run: %ld\n",
+                       __func__, PTR_ERR(task));
+               nfs4_clear_state_manager_bit(clp);
+               nfs_put_client(clp);
+               module_put(THIS_MODULE);
+       }
 }
 
 /*
@@ -1089,6 +1106,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
 {
        struct nfs_client *clp = server->nfs_client;
 
+       if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
+               nfs_async_inode_return_delegation(state->inode, &state->stateid);
        nfs4_state_mark_reclaim_nograce(clp, state);
        nfs4_schedule_state_manager(clp);
 }
@@ -1130,8 +1149,8 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
                        case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                                goto out;
                        default:
-                               printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
-                                               __func__, status);
+                               printk(KERN_ERR "NFS: %s: unhandled error %d. "
+                                       "Zeroing state\n", __func__, status);
                        case -ENOMEM:
                        case -NFS4ERR_DENIED:
                        case -NFS4ERR_RECLAIM_BAD:
@@ -1177,8 +1196,8 @@ restart:
                                spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
-                                               printk("%s: Lock reclaim failed!\n",
-                                                       __func__);
+                                               printk("NFS: %s: Lock reclaim "
+                                                       "failed!\n", __func__);
                                }
                                spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
@@ -1187,8 +1206,8 @@ restart:
                }
                switch (status) {
                        default:
-                               printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
-                                               __func__, status);
+                               printk(KERN_ERR "NFS: %s: unhandled error %d. "
+                                       "Zeroing state\n", __func__, status);
                        case -ENOENT:
                        case -ENOMEM:
                        case -ESTALE:
@@ -1756,7 +1775,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
        } while (atomic_read(&clp->cl_count) > 1);
        return;
 out_error:
-       printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
+       printk(KERN_WARNING "NFS: state manager failed on NFSv4 server %s"
                        " with error %d\n", clp->cl_hostname, -status);
        nfs4_end_drain_session(clp);
        nfs4_clear_state_manager_bit(clp);