]> Pileus Git - ~andy/linux/blob - fs/nfs/nfs4state.c
NFSv4: Clean up _nfs4_proc_open()
[~andy/linux] / fs / nfs / nfs4state.c
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56
57 #define OPENOWNER_POOL_SIZE     8
58
59 const nfs4_stateid zero_stateid;
60
61 static LIST_HEAD(nfs4_clientid_list);
62
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65         int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66                         nfs_callback_tcpport, cred);
67         if (status == 0)
68                 status = nfs4_proc_setclientid_confirm(clp, cred);
69         if (status == 0)
70                 nfs4_schedule_state_renewal(clp);
71         return status;
72 }
73
74 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
75 {
76         struct nfs4_state_owner *sp;
77         struct rb_node *pos;
78         struct rpc_cred *cred = NULL;
79
80         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
81                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
82                 if (list_empty(&sp->so_states))
83                         continue;
84                 cred = get_rpccred(sp->so_cred);
85                 break;
86         }
87         return cred;
88 }
89
90 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
91 {
92         struct nfs4_state_owner *sp;
93         struct rb_node *pos;
94
95         pos = rb_first(&clp->cl_state_owners);
96         if (pos != NULL) {
97                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
98                 return get_rpccred(sp->so_cred);
99         }
100         return NULL;
101 }
102
103 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
104                 __u64 minval, int maxbits)
105 {
106         struct rb_node **p, *parent;
107         struct nfs_unique_id *pos;
108         __u64 mask = ~0ULL;
109
110         if (maxbits < 64)
111                 mask = (1ULL << maxbits) - 1ULL;
112
113         /* Ensure distribution is more or less flat */
114         get_random_bytes(&new->id, sizeof(new->id));
115         new->id &= mask;
116         if (new->id < minval)
117                 new->id += minval;
118 retry:
119         p = &root->rb_node;
120         parent = NULL;
121
122         while (*p != NULL) {
123                 parent = *p;
124                 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
125
126                 if (new->id < pos->id)
127                         p = &(*p)->rb_left;
128                 else if (new->id > pos->id)
129                         p = &(*p)->rb_right;
130                 else
131                         goto id_exists;
132         }
133         rb_link_node(&new->rb_node, parent, p);
134         rb_insert_color(&new->rb_node, root);
135         return;
136 id_exists:
137         for (;;) {
138                 new->id++;
139                 if (new->id < minval || (new->id & mask) != new->id) {
140                         new->id = minval;
141                         break;
142                 }
143                 parent = rb_next(parent);
144                 if (parent == NULL)
145                         break;
146                 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
147                 if (new->id < pos->id)
148                         break;
149         }
150         goto retry;
151 }
152
153 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
154 {
155         rb_erase(&id->rb_node, root);
156 }
157
158 static struct nfs4_state_owner *
159 nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred)
160 {
161         struct rb_node **p = &clp->cl_state_owners.rb_node,
162                        *parent = NULL;
163         struct nfs4_state_owner *sp, *res = NULL;
164
165         while (*p != NULL) {
166                 parent = *p;
167                 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
168
169                 if (cred < sp->so_cred)
170                         p = &parent->rb_left;
171                 else if (cred > sp->so_cred)
172                         p = &parent->rb_right;
173                 else {
174                         atomic_inc(&sp->so_count);
175                         res = sp;
176                         break;
177                 }
178         }
179         return res;
180 }
181
182 static struct nfs4_state_owner *
183 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
184 {
185         struct rb_node **p = &clp->cl_state_owners.rb_node,
186                        *parent = NULL;
187         struct nfs4_state_owner *sp;
188
189         while (*p != NULL) {
190                 parent = *p;
191                 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
192
193                 if (new->so_cred < sp->so_cred)
194                         p = &parent->rb_left;
195                 else if (new->so_cred > sp->so_cred)
196                         p = &parent->rb_right;
197                 else {
198                         atomic_inc(&sp->so_count);
199                         return sp;
200                 }
201         }
202         nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
203         rb_link_node(&new->so_client_node, parent, p);
204         rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
205         return new;
206 }
207
208 static void
209 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
210 {
211         if (!RB_EMPTY_NODE(&sp->so_client_node))
212                 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
213         nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
214 }
215
216 /*
217  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
218  * create a new state_owner.
219  *
220  */
221 static struct nfs4_state_owner *
222 nfs4_alloc_state_owner(void)
223 {
224         struct nfs4_state_owner *sp;
225
226         sp = kzalloc(sizeof(*sp),GFP_KERNEL);
227         if (!sp)
228                 return NULL;
229         spin_lock_init(&sp->so_lock);
230         INIT_LIST_HEAD(&sp->so_states);
231         INIT_LIST_HEAD(&sp->so_delegations);
232         rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
233         sp->so_seqid.sequence = &sp->so_sequence;
234         spin_lock_init(&sp->so_sequence.lock);
235         INIT_LIST_HEAD(&sp->so_sequence.list);
236         atomic_set(&sp->so_count, 1);
237         return sp;
238 }
239
240 void
241 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
242 {
243         if (!RB_EMPTY_NODE(&sp->so_client_node)) {
244                 struct nfs_client *clp = sp->so_client;
245
246                 spin_lock(&clp->cl_lock);
247                 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
248                 RB_CLEAR_NODE(&sp->so_client_node);
249                 spin_unlock(&clp->cl_lock);
250         }
251 }
252
253 /*
254  * Note: must be called with clp->cl_sem held in order to prevent races
255  *       with reboot recovery!
256  */
257 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
258 {
259         struct nfs_client *clp = server->nfs_client;
260         struct nfs4_state_owner *sp, *new;
261
262         spin_lock(&clp->cl_lock);
263         sp = nfs4_find_state_owner(clp, cred);
264         spin_unlock(&clp->cl_lock);
265         if (sp != NULL)
266                 return sp;
267         new = nfs4_alloc_state_owner();
268         if (new == NULL)
269                 return NULL;
270         new->so_client = clp;
271         new->so_cred = cred;
272         spin_lock(&clp->cl_lock);
273         sp = nfs4_insert_state_owner(clp, new);
274         spin_unlock(&clp->cl_lock);
275         if (sp == new)
276                 get_rpccred(cred);
277         else
278                 kfree(new);
279         return sp;
280 }
281
282 /*
283  * Must be called with clp->cl_sem held in order to avoid races
284  * with state recovery...
285  */
286 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
287 {
288         struct nfs_client *clp = sp->so_client;
289         struct rpc_cred *cred = sp->so_cred;
290
291         if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
292                 return;
293         nfs4_remove_state_owner(clp, sp);
294         spin_unlock(&clp->cl_lock);
295         put_rpccred(cred);
296         kfree(sp);
297 }
298
299 static struct nfs4_state *
300 nfs4_alloc_open_state(void)
301 {
302         struct nfs4_state *state;
303
304         state = kzalloc(sizeof(*state), GFP_KERNEL);
305         if (!state)
306                 return NULL;
307         atomic_set(&state->count, 1);
308         INIT_LIST_HEAD(&state->lock_states);
309         spin_lock_init(&state->state_lock);
310         return state;
311 }
312
313 void
314 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
315 {
316         if (state->state == mode)
317                 return;
318         /* NB! List reordering - see the reclaim code for why.  */
319         if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
320                 if (mode & FMODE_WRITE)
321                         list_move(&state->open_states, &state->owner->so_states);
322                 else
323                         list_move_tail(&state->open_states, &state->owner->so_states);
324         }
325         if (mode == 0)
326                 list_del_init(&state->inode_states);
327         state->state = mode;
328 }
329
330 static struct nfs4_state *
331 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
332 {
333         struct nfs_inode *nfsi = NFS_I(inode);
334         struct nfs4_state *state;
335
336         list_for_each_entry(state, &nfsi->open_states, inode_states) {
337                 if (state->owner != owner)
338                         continue;
339                 if (atomic_inc_not_zero(&state->count))
340                         return state;
341         }
342         return NULL;
343 }
344
345 static void
346 nfs4_free_open_state(struct nfs4_state *state)
347 {
348         kfree(state);
349 }
350
351 struct nfs4_state *
352 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
353 {
354         struct nfs4_state *state, *new;
355         struct nfs_inode *nfsi = NFS_I(inode);
356
357         spin_lock(&inode->i_lock);
358         state = __nfs4_find_state_byowner(inode, owner);
359         spin_unlock(&inode->i_lock);
360         if (state)
361                 goto out;
362         new = nfs4_alloc_open_state();
363         spin_lock(&owner->so_lock);
364         spin_lock(&inode->i_lock);
365         state = __nfs4_find_state_byowner(inode, owner);
366         if (state == NULL && new != NULL) {
367                 state = new;
368                 state->owner = owner;
369                 atomic_inc(&owner->so_count);
370                 list_add(&state->inode_states, &nfsi->open_states);
371                 state->inode = igrab(inode);
372                 spin_unlock(&inode->i_lock);
373                 /* Note: The reclaim code dictates that we add stateless
374                  * and read-only stateids to the end of the list */
375                 list_add_tail(&state->open_states, &owner->so_states);
376                 spin_unlock(&owner->so_lock);
377         } else {
378                 spin_unlock(&inode->i_lock);
379                 spin_unlock(&owner->so_lock);
380                 if (new)
381                         nfs4_free_open_state(new);
382         }
383 out:
384         return state;
385 }
386
387 /*
388  * Beware! Caller must be holding exactly one
389  * reference to clp->cl_sem!
390  */
391 void nfs4_put_open_state(struct nfs4_state *state)
392 {
393         struct inode *inode = state->inode;
394         struct nfs4_state_owner *owner = state->owner;
395
396         if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
397                 return;
398         spin_lock(&inode->i_lock);
399         if (!list_empty(&state->inode_states))
400                 list_del(&state->inode_states);
401         list_del(&state->open_states);
402         spin_unlock(&inode->i_lock);
403         spin_unlock(&owner->so_lock);
404         iput(inode);
405         nfs4_free_open_state(state);
406         nfs4_put_state_owner(owner);
407 }
408
409 /*
410  * Close the current file.
411  */
412 void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
413 {
414         struct inode *inode = state->inode;
415         struct nfs4_state_owner *owner = state->owner;
416         int call_close = 0;
417         int newstate;
418
419         atomic_inc(&owner->so_count);
420         /* Protect against nfs4_find_state() */
421         spin_lock(&owner->so_lock);
422         spin_lock(&inode->i_lock);
423         switch (mode & (FMODE_READ | FMODE_WRITE)) {
424                 case FMODE_READ:
425                         state->n_rdonly--;
426                         break;
427                 case FMODE_WRITE:
428                         state->n_wronly--;
429                         break;
430                 case FMODE_READ|FMODE_WRITE:
431                         state->n_rdwr--;
432         }
433         newstate = FMODE_READ|FMODE_WRITE;
434         if (state->n_rdwr == 0) {
435                 if (state->n_rdonly == 0) {
436                         newstate &= ~FMODE_READ;
437                         call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
438                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
439                 }
440                 if (state->n_wronly == 0) {
441                         newstate &= ~FMODE_WRITE;
442                         call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
443                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
444                 }
445                 if (newstate == 0)
446                         clear_bit(NFS_DELEGATED_STATE, &state->flags);
447         }
448         nfs4_state_set_mode_locked(state, newstate);
449         spin_unlock(&inode->i_lock);
450         spin_unlock(&owner->so_lock);
451
452         if (!call_close) {
453                 nfs4_put_open_state(state);
454                 nfs4_put_state_owner(owner);
455         } else
456                 nfs4_do_close(path, state);
457 }
458
459 /*
460  * Search the state->lock_states for an existing lock_owner
461  * that is compatible with current->files
462  */
463 static struct nfs4_lock_state *
464 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
465 {
466         struct nfs4_lock_state *pos;
467         list_for_each_entry(pos, &state->lock_states, ls_locks) {
468                 if (pos->ls_owner != fl_owner)
469                         continue;
470                 atomic_inc(&pos->ls_count);
471                 return pos;
472         }
473         return NULL;
474 }
475
476 /*
477  * Return a compatible lock_state. If no initialized lock_state structure
478  * exists, return an uninitialized one.
479  *
480  */
481 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
482 {
483         struct nfs4_lock_state *lsp;
484         struct nfs_client *clp = state->owner->so_client;
485
486         lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
487         if (lsp == NULL)
488                 return NULL;
489         lsp->ls_seqid.sequence = &state->owner->so_sequence;
490         atomic_set(&lsp->ls_count, 1);
491         lsp->ls_owner = fl_owner;
492         spin_lock(&clp->cl_lock);
493         nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
494         spin_unlock(&clp->cl_lock);
495         INIT_LIST_HEAD(&lsp->ls_locks);
496         return lsp;
497 }
498
499 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
500 {
501         struct nfs_client *clp = lsp->ls_state->owner->so_client;
502
503         spin_lock(&clp->cl_lock);
504         nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
505         spin_unlock(&clp->cl_lock);
506         kfree(lsp);
507 }
508
509 /*
510  * Return a compatible lock_state. If no initialized lock_state structure
511  * exists, return an uninitialized one.
512  *
513  * The caller must be holding clp->cl_sem
514  */
515 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
516 {
517         struct nfs4_lock_state *lsp, *new = NULL;
518         
519         for(;;) {
520                 spin_lock(&state->state_lock);
521                 lsp = __nfs4_find_lock_state(state, owner);
522                 if (lsp != NULL)
523                         break;
524                 if (new != NULL) {
525                         new->ls_state = state;
526                         list_add(&new->ls_locks, &state->lock_states);
527                         set_bit(LK_STATE_IN_USE, &state->flags);
528                         lsp = new;
529                         new = NULL;
530                         break;
531                 }
532                 spin_unlock(&state->state_lock);
533                 new = nfs4_alloc_lock_state(state, owner);
534                 if (new == NULL)
535                         return NULL;
536         }
537         spin_unlock(&state->state_lock);
538         if (new != NULL)
539                 nfs4_free_lock_state(new);
540         return lsp;
541 }
542
543 /*
544  * Release reference to lock_state, and free it if we see that
545  * it is no longer in use
546  */
547 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
548 {
549         struct nfs4_state *state;
550
551         if (lsp == NULL)
552                 return;
553         state = lsp->ls_state;
554         if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
555                 return;
556         list_del(&lsp->ls_locks);
557         if (list_empty(&state->lock_states))
558                 clear_bit(LK_STATE_IN_USE, &state->flags);
559         spin_unlock(&state->state_lock);
560         nfs4_free_lock_state(lsp);
561 }
562
563 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
564 {
565         struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
566
567         dst->fl_u.nfs4_fl.owner = lsp;
568         atomic_inc(&lsp->ls_count);
569 }
570
571 static void nfs4_fl_release_lock(struct file_lock *fl)
572 {
573         nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
574 }
575
576 static struct file_lock_operations nfs4_fl_lock_ops = {
577         .fl_copy_lock = nfs4_fl_copy_lock,
578         .fl_release_private = nfs4_fl_release_lock,
579 };
580
581 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
582 {
583         struct nfs4_lock_state *lsp;
584
585         if (fl->fl_ops != NULL)
586                 return 0;
587         lsp = nfs4_get_lock_state(state, fl->fl_owner);
588         if (lsp == NULL)
589                 return -ENOMEM;
590         fl->fl_u.nfs4_fl.owner = lsp;
591         fl->fl_ops = &nfs4_fl_lock_ops;
592         return 0;
593 }
594
595 /*
596  * Byte-range lock aware utility to initialize the stateid of read/write
597  * requests.
598  */
599 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
600 {
601         struct nfs4_lock_state *lsp;
602
603         memcpy(dst, &state->stateid, sizeof(*dst));
604         if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
605                 return;
606
607         spin_lock(&state->state_lock);
608         lsp = __nfs4_find_lock_state(state, fl_owner);
609         if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
610                 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
611         spin_unlock(&state->state_lock);
612         nfs4_put_lock_state(lsp);
613 }
614
615 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
616 {
617         struct rpc_sequence *sequence = counter->sequence;
618         struct nfs_seqid *new;
619
620         new = kmalloc(sizeof(*new), GFP_KERNEL);
621         if (new != NULL) {
622                 new->sequence = counter;
623                 spin_lock(&sequence->lock);
624                 list_add_tail(&new->list, &sequence->list);
625                 spin_unlock(&sequence->lock);
626         }
627         return new;
628 }
629
630 void nfs_free_seqid(struct nfs_seqid *seqid)
631 {
632         struct rpc_sequence *sequence = seqid->sequence->sequence;
633
634         spin_lock(&sequence->lock);
635         list_del(&seqid->list);
636         spin_unlock(&sequence->lock);
637         rpc_wake_up(&sequence->wait);
638         kfree(seqid);
639 }
640
641 /*
642  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
643  * failed with a seqid incrementing error -
644  * see comments nfs_fs.h:seqid_mutating_error()
645  */
646 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
647 {
648         switch (status) {
649                 case 0:
650                         break;
651                 case -NFS4ERR_BAD_SEQID:
652                         if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
653                                 return;
654                         printk(KERN_WARNING "NFS: v4 server returned a bad"
655                                         "sequence-id error on an"
656                                         "unconfirmed sequence %p!\n",
657                                         seqid->sequence);
658                 case -NFS4ERR_STALE_CLIENTID:
659                 case -NFS4ERR_STALE_STATEID:
660                 case -NFS4ERR_BAD_STATEID:
661                 case -NFS4ERR_BADXDR:
662                 case -NFS4ERR_RESOURCE:
663                 case -NFS4ERR_NOFILEHANDLE:
664                         /* Non-seqid mutating errors */
665                         return;
666         };
667         /*
668          * Note: no locking needed as we are guaranteed to be first
669          * on the sequence list
670          */
671         seqid->sequence->counter++;
672 }
673
674 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
675 {
676         if (status == -NFS4ERR_BAD_SEQID) {
677                 struct nfs4_state_owner *sp = container_of(seqid->sequence,
678                                 struct nfs4_state_owner, so_seqid);
679                 nfs4_drop_state_owner(sp);
680         }
681         nfs_increment_seqid(status, seqid);
682 }
683
684 /*
685  * Increment the seqid if the LOCK/LOCKU succeeded, or
686  * failed with a seqid incrementing error -
687  * see comments nfs_fs.h:seqid_mutating_error()
688  */
689 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
690 {
691         nfs_increment_seqid(status, seqid);
692 }
693
694 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
695 {
696         struct rpc_sequence *sequence = seqid->sequence->sequence;
697         int status = 0;
698
699         if (sequence->list.next == &seqid->list)
700                 goto out;
701         spin_lock(&sequence->lock);
702         if (sequence->list.next != &seqid->list) {
703                 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
704                 status = -EAGAIN;
705         }
706         spin_unlock(&sequence->lock);
707 out:
708         return status;
709 }
710
711 static int reclaimer(void *);
712
713 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
714 {
715         smp_mb__before_clear_bit();
716         clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
717         smp_mb__after_clear_bit();
718         wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
719         rpc_wake_up(&clp->cl_rpcwaitq);
720 }
721
722 /*
723  * State recovery routine
724  */
725 static void nfs4_recover_state(struct nfs_client *clp)
726 {
727         struct task_struct *task;
728
729         __module_get(THIS_MODULE);
730         atomic_inc(&clp->cl_count);
731         task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
732                         NIPQUAD(clp->cl_addr.sin_addr));
733         if (!IS_ERR(task))
734                 return;
735         nfs4_clear_recover_bit(clp);
736         nfs_put_client(clp);
737         module_put(THIS_MODULE);
738 }
739
740 /*
741  * Schedule a state recovery attempt
742  */
743 void nfs4_schedule_state_recovery(struct nfs_client *clp)
744 {
745         if (!clp)
746                 return;
747         if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
748                 nfs4_recover_state(clp);
749 }
750
751 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
752 {
753         struct inode *inode = state->inode;
754         struct file_lock *fl;
755         int status = 0;
756
757         for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
758                 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
759                         continue;
760                 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
761                         continue;
762                 status = ops->recover_lock(state, fl);
763                 if (status >= 0)
764                         continue;
765                 switch (status) {
766                         default:
767                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
768                                                 __FUNCTION__, status);
769                         case -NFS4ERR_EXPIRED:
770                         case -NFS4ERR_NO_GRACE:
771                         case -NFS4ERR_RECLAIM_BAD:
772                         case -NFS4ERR_RECLAIM_CONFLICT:
773                                 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
774                                 break;
775                         case -NFS4ERR_STALE_CLIENTID:
776                                 goto out_err;
777                 }
778         }
779         return 0;
780 out_err:
781         return status;
782 }
783
784 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
785 {
786         struct nfs4_state *state;
787         struct nfs4_lock_state *lock;
788         int status = 0;
789
790         /* Note: we rely on the sp->so_states list being ordered 
791          * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
792          * states first.
793          * This is needed to ensure that the server won't give us any
794          * read delegations that we have to return if, say, we are
795          * recovering after a network partition or a reboot from a
796          * server that doesn't support a grace period.
797          */
798         list_for_each_entry(state, &sp->so_states, open_states) {
799                 if (state->state == 0)
800                         continue;
801                 status = ops->recover_open(sp, state);
802                 if (status >= 0) {
803                         status = nfs4_reclaim_locks(ops, state);
804                         if (status < 0)
805                                 goto out_err;
806                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
807                                 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
808                                         printk("%s: Lock reclaim failed!\n",
809                                                         __FUNCTION__);
810                         }
811                         continue;
812                 }
813                 switch (status) {
814                         default:
815                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
816                                                 __FUNCTION__, status);
817                         case -ENOENT:
818                         case -NFS4ERR_RECLAIM_BAD:
819                         case -NFS4ERR_RECLAIM_CONFLICT:
820                                 /*
821                                  * Open state on this file cannot be recovered
822                                  * All we can do is revert to using the zero stateid.
823                                  */
824                                 memset(state->stateid.data, 0,
825                                         sizeof(state->stateid.data));
826                                 /* Mark the file as being 'closed' */
827                                 state->state = 0;
828                                 break;
829                         case -NFS4ERR_EXPIRED:
830                         case -NFS4ERR_NO_GRACE:
831                         case -NFS4ERR_STALE_CLIENTID:
832                                 goto out_err;
833                 }
834         }
835         return 0;
836 out_err:
837         return status;
838 }
839
840 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
841 {
842         struct nfs4_state_owner *sp;
843         struct rb_node *pos;
844         struct nfs4_state *state;
845         struct nfs4_lock_state *lock;
846
847         /* Reset all sequence ids to zero */
848         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
849                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
850                 sp->so_seqid.counter = 0;
851                 sp->so_seqid.flags = 0;
852                 spin_lock(&sp->so_lock);
853                 list_for_each_entry(state, &sp->so_states, open_states) {
854                         clear_bit(NFS_DELEGATED_STATE, &state->flags);
855                         clear_bit(NFS_O_RDONLY_STATE, &state->flags);
856                         clear_bit(NFS_O_WRONLY_STATE, &state->flags);
857                         clear_bit(NFS_O_RDWR_STATE, &state->flags);
858                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
859                                 lock->ls_seqid.counter = 0;
860                                 lock->ls_seqid.flags = 0;
861                                 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
862                         }
863                 }
864                 spin_unlock(&sp->so_lock);
865         }
866 }
867
868 static int reclaimer(void *ptr)
869 {
870         struct nfs_client *clp = ptr;
871         struct nfs4_state_owner *sp;
872         struct rb_node *pos;
873         struct nfs4_state_recovery_ops *ops;
874         struct rpc_cred *cred;
875         int status = 0;
876
877         allow_signal(SIGKILL);
878
879         /* Ensure exclusive access to NFSv4 state */
880         lock_kernel();
881         down_write(&clp->cl_sem);
882         /* Are there any NFS mounts out there? */
883         if (list_empty(&clp->cl_superblocks))
884                 goto out;
885 restart_loop:
886         ops = &nfs4_network_partition_recovery_ops;
887         /* Are there any open files on this volume? */
888         cred = nfs4_get_renew_cred(clp);
889         if (cred != NULL) {
890                 /* Yes there are: try to renew the old lease */
891                 status = nfs4_proc_renew(clp, cred);
892                 switch (status) {
893                         case 0:
894                         case -NFS4ERR_CB_PATH_DOWN:
895                                 put_rpccred(cred);
896                                 goto out;
897                         case -NFS4ERR_STALE_CLIENTID:
898                         case -NFS4ERR_LEASE_MOVED:
899                                 ops = &nfs4_reboot_recovery_ops;
900                 }
901         } else {
902                 /* "reboot" to ensure we clear all state on the server */
903                 clp->cl_boot_time = CURRENT_TIME;
904                 cred = nfs4_get_setclientid_cred(clp);
905         }
906         /* We're going to have to re-establish a clientid */
907         nfs4_state_mark_reclaim(clp);
908         status = -ENOENT;
909         if (cred != NULL) {
910                 status = nfs4_init_client(clp, cred);
911                 put_rpccred(cred);
912         }
913         if (status)
914                 goto out_error;
915         /* Mark all delegations for reclaim */
916         nfs_delegation_mark_reclaim(clp);
917         /* Note: list is protected by exclusive lock on cl->cl_sem */
918         for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
919                 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
920                 status = nfs4_reclaim_open_state(ops, sp);
921                 if (status < 0) {
922                         if (status == -NFS4ERR_NO_GRACE) {
923                                 ops = &nfs4_network_partition_recovery_ops;
924                                 status = nfs4_reclaim_open_state(ops, sp);
925                         }
926                         if (status == -NFS4ERR_STALE_CLIENTID)
927                                 goto restart_loop;
928                         if (status == -NFS4ERR_EXPIRED)
929                                 goto restart_loop;
930                 }
931         }
932         nfs_delegation_reap_unclaimed(clp);
933 out:
934         up_write(&clp->cl_sem);
935         unlock_kernel();
936         if (status == -NFS4ERR_CB_PATH_DOWN)
937                 nfs_handle_cb_pathdown(clp);
938         nfs4_clear_recover_bit(clp);
939         nfs_put_client(clp);
940         module_put_and_exit(0);
941         return 0;
942 out_error:
943         printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
944                                 NIPQUAD(clp->cl_addr.sin_addr), -status);
945         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
946         goto out;
947 }
948
949 /*
950  * Local variables:
951  *  c-basic-offset: 8
952  * End:
953  */