]> Pileus Git - ~andy/linux/blob - drivers/staging/lustre/lustre/obdclass/cl_lock.c
Linux 3.14
[~andy/linux] / drivers / staging / lustre / lustre / obdclass / cl_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <linux/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
49
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class;
52 static struct kmem_cache *cl_lock_kmem;
53
54 static struct lu_kmem_descr cl_lock_caches[] = {
55         {
56                 .ckd_cache = &cl_lock_kmem,
57                 .ckd_name  = "cl_lock_kmem",
58                 .ckd_size  = sizeof (struct cl_lock)
59         },
60         {
61                 .ckd_cache = NULL
62         }
63 };
64
65 #define CS_LOCK_INC(o, item)
66 #define CS_LOCK_DEC(o, item)
67 #define CS_LOCKSTATE_INC(o, state)
68 #define CS_LOCKSTATE_DEC(o, state)
69
70 /**
71  * Basic lock invariant that is maintained at all times. Caller either has a
72  * reference to \a lock, or somehow assures that \a lock cannot be freed.
73  *
74  * \see cl_lock_invariant()
75  */
76 static int cl_lock_invariant_trusted(const struct lu_env *env,
77                                      const struct cl_lock *lock)
78 {
79         return  ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80                 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81                 lock->cll_holds >= lock->cll_users &&
82                 lock->cll_holds >= 0 &&
83                 lock->cll_users >= 0 &&
84                 lock->cll_depth >= 0;
85 }
86
87 /**
88  * Stronger lock invariant, checking that caller has a reference on a lock.
89  *
90  * \see cl_lock_invariant_trusted()
91  */
92 static int cl_lock_invariant(const struct lu_env *env,
93                              const struct cl_lock *lock)
94 {
95         int result;
96
97         result = atomic_read(&lock->cll_ref) > 0 &&
98                 cl_lock_invariant_trusted(env, lock);
99         if (!result && env != NULL)
100                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
101         return result;
102 }
103
104 /**
105  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
106  */
107 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
108 {
109         return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
110 }
111
112 /**
113  * Returns a set of counters for this lock, depending on a lock nesting.
114  */
115 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116                                                    const struct cl_lock *lock)
117 {
118         struct cl_thread_info *info;
119         enum clt_nesting_level nesting;
120
121         info = cl_env_info(env);
122         nesting = cl_lock_nesting(lock);
123         LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124         return &info->clt_counters[nesting];
125 }
126
127 static void cl_lock_trace0(int level, const struct lu_env *env,
128                            const char *prefix, const struct cl_lock *lock,
129                            const char *func, const int line)
130 {
131         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132         CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
133                       "(%p/%d/%d) at %s():%d\n",
134                prefix, lock, atomic_read(&lock->cll_ref),
135                lock->cll_guarder, lock->cll_depth,
136                lock->cll_state, lock->cll_error, lock->cll_holds,
137                lock->cll_users, lock->cll_flags,
138                env, h->coh_nesting, cl_lock_nr_mutexed(env),
139                func, line);
140 }
141 #define cl_lock_trace(level, env, prefix, lock)                  \
142         cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
143
144 #define RETIP ((unsigned long)__builtin_return_address(0))
145
146 #ifdef CONFIG_LOCKDEP
147 static struct lock_class_key cl_lock_key;
148
149 static void cl_lock_lockdep_init(struct cl_lock *lock)
150 {
151         lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
152 }
153
154 static void cl_lock_lockdep_acquire(const struct lu_env *env,
155                                     struct cl_lock *lock, __u32 enqflags)
156 {
157         cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
158         lock_map_acquire(&lock->dep_map);
159 }
160
161 static void cl_lock_lockdep_release(const struct lu_env *env,
162                                     struct cl_lock *lock)
163 {
164         cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
165         lock_release(&lock->dep_map, 0, RETIP);
166 }
167
168 #else /* !CONFIG_LOCKDEP */
169
170 static void cl_lock_lockdep_init(struct cl_lock *lock)
171 {}
172 static void cl_lock_lockdep_acquire(const struct lu_env *env,
173                                     struct cl_lock *lock, __u32 enqflags)
174 {}
175 static void cl_lock_lockdep_release(const struct lu_env *env,
176                                     struct cl_lock *lock)
177 {}
178
179 #endif /* !CONFIG_LOCKDEP */
180
181 /**
182  * Adds lock slice to the compound lock.
183  *
184  * This is called by cl_object_operations::coo_lock_init() methods to add a
185  * per-layer state to the lock. New state is added at the end of
186  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
187  *
188  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
189  */
190 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
191                        struct cl_object *obj,
192                        const struct cl_lock_operations *ops)
193 {
194         slice->cls_lock = lock;
195         list_add_tail(&slice->cls_linkage, &lock->cll_layers);
196         slice->cls_obj = obj;
197         slice->cls_ops = ops;
198 }
199 EXPORT_SYMBOL(cl_lock_slice_add);
200
201 /**
202  * Returns true iff a lock with the mode \a has provides at least the same
203  * guarantees as a lock with the mode \a need.
204  */
205 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
206 {
207         LINVRNT(need == CLM_READ || need == CLM_WRITE ||
208                 need == CLM_PHANTOM || need == CLM_GROUP);
209         LINVRNT(has == CLM_READ || has == CLM_WRITE ||
210                 has == CLM_PHANTOM || has == CLM_GROUP);
211         CLASSERT(CLM_PHANTOM < CLM_READ);
212         CLASSERT(CLM_READ < CLM_WRITE);
213         CLASSERT(CLM_WRITE < CLM_GROUP);
214
215         if (has != CLM_GROUP)
216                 return need <= has;
217         else
218                 return need == has;
219 }
220 EXPORT_SYMBOL(cl_lock_mode_match);
221
222 /**
223  * Returns true iff extent portions of lock descriptions match.
224  */
225 int cl_lock_ext_match(const struct cl_lock_descr *has,
226                       const struct cl_lock_descr *need)
227 {
228         return
229                 has->cld_start <= need->cld_start &&
230                 has->cld_end >= need->cld_end &&
231                 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
232                 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
233 }
234 EXPORT_SYMBOL(cl_lock_ext_match);
235
236 /**
237  * Returns true iff a lock with the description \a has provides at least the
238  * same guarantees as a lock with the description \a need.
239  */
240 int cl_lock_descr_match(const struct cl_lock_descr *has,
241                         const struct cl_lock_descr *need)
242 {
243         return
244                 cl_object_same(has->cld_obj, need->cld_obj) &&
245                 cl_lock_ext_match(has, need);
246 }
247 EXPORT_SYMBOL(cl_lock_descr_match);
248
249 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
250 {
251         struct cl_object *obj = lock->cll_descr.cld_obj;
252
253         LINVRNT(!cl_lock_is_mutexed(lock));
254
255         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
256         might_sleep();
257         while (!list_empty(&lock->cll_layers)) {
258                 struct cl_lock_slice *slice;
259
260                 slice = list_entry(lock->cll_layers.next,
261                                        struct cl_lock_slice, cls_linkage);
262                 list_del_init(lock->cll_layers.next);
263                 slice->cls_ops->clo_fini(env, slice);
264         }
265         CS_LOCK_DEC(obj, total);
266         CS_LOCKSTATE_DEC(obj, lock->cll_state);
267         lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
268         cl_object_put(env, obj);
269         lu_ref_fini(&lock->cll_reference);
270         lu_ref_fini(&lock->cll_holders);
271         mutex_destroy(&lock->cll_guard);
272         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
273 }
274
275 /**
276  * Releases a reference on a lock.
277  *
278  * When last reference is released, lock is returned to the cache, unless it
279  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
280  * immediately.
281  *
282  * \see cl_object_put(), cl_page_put()
283  */
284 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
285 {
286         struct cl_object        *obj;
287
288         LINVRNT(cl_lock_invariant(env, lock));
289         obj = lock->cll_descr.cld_obj;
290         LINVRNT(obj != NULL);
291
292         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
293                atomic_read(&lock->cll_ref), lock, RETIP);
294
295         if (atomic_dec_and_test(&lock->cll_ref)) {
296                 if (lock->cll_state == CLS_FREEING) {
297                         LASSERT(list_empty(&lock->cll_linkage));
298                         cl_lock_free(env, lock);
299                 }
300                 CS_LOCK_DEC(obj, busy);
301         }
302 }
303 EXPORT_SYMBOL(cl_lock_put);
304
305 /**
306  * Acquires an additional reference to a lock.
307  *
308  * This can be called only by caller already possessing a reference to \a
309  * lock.
310  *
311  * \see cl_object_get(), cl_page_get()
312  */
313 void cl_lock_get(struct cl_lock *lock)
314 {
315         LINVRNT(cl_lock_invariant(NULL, lock));
316         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
317                atomic_read(&lock->cll_ref), lock, RETIP);
318         atomic_inc(&lock->cll_ref);
319 }
320 EXPORT_SYMBOL(cl_lock_get);
321
322 /**
323  * Acquires a reference to a lock.
324  *
325  * This is much like cl_lock_get(), except that this function can be used to
326  * acquire initial reference to the cached lock. Caller has to deal with all
327  * possible races. Use with care!
328  *
329  * \see cl_page_get_trust()
330  */
331 void cl_lock_get_trust(struct cl_lock *lock)
332 {
333         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
334                atomic_read(&lock->cll_ref), lock, RETIP);
335         if (atomic_inc_return(&lock->cll_ref) == 1)
336                 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
337 }
338 EXPORT_SYMBOL(cl_lock_get_trust);
339
340 /**
341  * Helper function destroying the lock that wasn't completely initialized.
342  *
343  * Other threads can acquire references to the top-lock through its
344  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
345  */
346 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
347 {
348         cl_lock_mutex_get(env, lock);
349         cl_lock_cancel(env, lock);
350         cl_lock_delete(env, lock);
351         cl_lock_mutex_put(env, lock);
352         cl_lock_put(env, lock);
353 }
354
355 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
356                                      struct cl_object *obj,
357                                      const struct cl_io *io,
358                                      const struct cl_lock_descr *descr)
359 {
360         struct cl_lock    *lock;
361         struct lu_object_header *head;
362
363         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
364         if (lock != NULL) {
365                 atomic_set(&lock->cll_ref, 1);
366                 lock->cll_descr = *descr;
367                 lock->cll_state = CLS_NEW;
368                 cl_object_get(obj);
369                 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
370                                      lock);
371                 INIT_LIST_HEAD(&lock->cll_layers);
372                 INIT_LIST_HEAD(&lock->cll_linkage);
373                 INIT_LIST_HEAD(&lock->cll_inclosure);
374                 lu_ref_init(&lock->cll_reference);
375                 lu_ref_init(&lock->cll_holders);
376                 mutex_init(&lock->cll_guard);
377                 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
378                 init_waitqueue_head(&lock->cll_wq);
379                 head = obj->co_lu.lo_header;
380                 CS_LOCKSTATE_INC(obj, CLS_NEW);
381                 CS_LOCK_INC(obj, total);
382                 CS_LOCK_INC(obj, create);
383                 cl_lock_lockdep_init(lock);
384                 list_for_each_entry(obj, &head->loh_layers,
385                                         co_lu.lo_linkage) {
386                         int err;
387
388                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
389                         if (err != 0) {
390                                 cl_lock_finish(env, lock);
391                                 lock = ERR_PTR(err);
392                                 break;
393                         }
394                 }
395         } else
396                 lock = ERR_PTR(-ENOMEM);
397         return lock;
398 }
399
400 /**
401  * Transfer the lock into INTRANSIT state and return the original state.
402  *
403  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
404  * \post state: CLS_INTRANSIT
405  * \see CLS_INTRANSIT
406  */
407 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
408                                      struct cl_lock *lock)
409 {
410         enum cl_lock_state state = lock->cll_state;
411
412         LASSERT(cl_lock_is_mutexed(lock));
413         LASSERT(state != CLS_INTRANSIT);
414         LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
415                  "Malformed lock state %d.\n", state);
416
417         cl_lock_state_set(env, lock, CLS_INTRANSIT);
418         lock->cll_intransit_owner = current;
419         cl_lock_hold_add(env, lock, "intransit", current);
420         return state;
421 }
422 EXPORT_SYMBOL(cl_lock_intransit);
423
424 /**
425  *  Exit the intransit state and restore the lock state to the original state
426  */
427 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
428                        enum cl_lock_state state)
429 {
430         LASSERT(cl_lock_is_mutexed(lock));
431         LASSERT(lock->cll_state == CLS_INTRANSIT);
432         LASSERT(state != CLS_INTRANSIT);
433         LASSERT(lock->cll_intransit_owner == current);
434
435         lock->cll_intransit_owner = NULL;
436         cl_lock_state_set(env, lock, state);
437         cl_lock_unhold(env, lock, "intransit", current);
438 }
439 EXPORT_SYMBOL(cl_lock_extransit);
440
441 /**
442  * Checking whether the lock is intransit state
443  */
444 int cl_lock_is_intransit(struct cl_lock *lock)
445 {
446         LASSERT(cl_lock_is_mutexed(lock));
447         return lock->cll_state == CLS_INTRANSIT &&
448                lock->cll_intransit_owner != current;
449 }
450 EXPORT_SYMBOL(cl_lock_is_intransit);
451 /**
452  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
453  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
454  * cover multiple stripes and can trigger cascading timeouts.
455  */
456 static int cl_lock_fits_into(const struct lu_env *env,
457                              const struct cl_lock *lock,
458                              const struct cl_lock_descr *need,
459                              const struct cl_io *io)
460 {
461         const struct cl_lock_slice *slice;
462
463         LINVRNT(cl_lock_invariant_trusted(env, lock));
464         list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
465                 if (slice->cls_ops->clo_fits_into != NULL &&
466                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
467                         return 0;
468         }
469         return 1;
470 }
471
472 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
473                                       struct cl_object *obj,
474                                       const struct cl_io *io,
475                                       const struct cl_lock_descr *need)
476 {
477         struct cl_lock    *lock;
478         struct cl_object_header *head;
479
480         head = cl_object_header(obj);
481         LINVRNT(spin_is_locked(&head->coh_lock_guard));
482         CS_LOCK_INC(obj, lookup);
483         list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
484                 int matched;
485
486                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
487                           lock->cll_state < CLS_FREEING &&
488                           lock->cll_error == 0 &&
489                           !(lock->cll_flags & CLF_CANCELLED) &&
490                           cl_lock_fits_into(env, lock, need, io);
491                 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
492                        PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
493                        matched);
494                 if (matched) {
495                         cl_lock_get_trust(lock);
496                         CS_LOCK_INC(obj, hit);
497                         return lock;
498                 }
499         }
500         return NULL;
501 }
502
503 /**
504  * Returns a lock matching description \a need.
505  *
506  * This is the main entry point into the cl_lock caching interface. First, a
507  * cache (implemented as a per-object linked list) is consulted. If lock is
508  * found there, it is returned immediately. Otherwise new lock is allocated
509  * and returned. In any case, additional reference to lock is acquired.
510  *
511  * \see cl_object_find(), cl_page_find()
512  */
513 static struct cl_lock *cl_lock_find(const struct lu_env *env,
514                                     const struct cl_io *io,
515                                     const struct cl_lock_descr *need)
516 {
517         struct cl_object_header *head;
518         struct cl_object        *obj;
519         struct cl_lock    *lock;
520
521         obj  = need->cld_obj;
522         head = cl_object_header(obj);
523
524         spin_lock(&head->coh_lock_guard);
525         lock = cl_lock_lookup(env, obj, io, need);
526         spin_unlock(&head->coh_lock_guard);
527
528         if (lock == NULL) {
529                 lock = cl_lock_alloc(env, obj, io, need);
530                 if (!IS_ERR(lock)) {
531                         struct cl_lock *ghost;
532
533                         spin_lock(&head->coh_lock_guard);
534                         ghost = cl_lock_lookup(env, obj, io, need);
535                         if (ghost == NULL) {
536                                 list_add_tail(&lock->cll_linkage,
537                                                   &head->coh_locks);
538                                 spin_unlock(&head->coh_lock_guard);
539                                 CS_LOCK_INC(obj, busy);
540                         } else {
541                                 spin_unlock(&head->coh_lock_guard);
542                                 /*
543                                  * Other threads can acquire references to the
544                                  * top-lock through its sub-locks. Hence, it
545                                  * cannot be cl_lock_free()-ed immediately.
546                                  */
547                                 cl_lock_finish(env, lock);
548                                 lock = ghost;
549                         }
550                 }
551         }
552         return lock;
553 }
554
555 /**
556  * Returns existing lock matching given description. This is similar to
557  * cl_lock_find() except that no new lock is created, and returned lock is
558  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
559  */
560 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
561                              const struct cl_lock_descr *need,
562                              const char *scope, const void *source)
563 {
564         struct cl_object_header *head;
565         struct cl_object        *obj;
566         struct cl_lock    *lock;
567
568         obj  = need->cld_obj;
569         head = cl_object_header(obj);
570
571         do {
572                 spin_lock(&head->coh_lock_guard);
573                 lock = cl_lock_lookup(env, obj, io, need);
574                 spin_unlock(&head->coh_lock_guard);
575                 if (lock == NULL)
576                         return NULL;
577
578                 cl_lock_mutex_get(env, lock);
579                 if (lock->cll_state == CLS_INTRANSIT)
580                         /* Don't care return value. */
581                         cl_lock_state_wait(env, lock);
582                 if (lock->cll_state == CLS_FREEING) {
583                         cl_lock_mutex_put(env, lock);
584                         cl_lock_put(env, lock);
585                         lock = NULL;
586                 }
587         } while (lock == NULL);
588
589         cl_lock_hold_add(env, lock, scope, source);
590         cl_lock_user_add(env, lock);
591         if (lock->cll_state == CLS_CACHED)
592                 cl_use_try(env, lock, 1);
593         if (lock->cll_state == CLS_HELD) {
594                 cl_lock_mutex_put(env, lock);
595                 cl_lock_lockdep_acquire(env, lock, 0);
596                 cl_lock_put(env, lock);
597         } else {
598                 cl_unuse_try(env, lock);
599                 cl_lock_unhold(env, lock, scope, source);
600                 cl_lock_mutex_put(env, lock);
601                 cl_lock_put(env, lock);
602                 lock = NULL;
603         }
604
605         return lock;
606 }
607 EXPORT_SYMBOL(cl_lock_peek);
608
609 /**
610  * Returns a slice within a lock, corresponding to the given layer in the
611  * device stack.
612  *
613  * \see cl_page_at()
614  */
615 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
616                                        const struct lu_device_type *dtype)
617 {
618         const struct cl_lock_slice *slice;
619
620         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
621
622         list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
623                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
624                         return slice;
625         }
626         return NULL;
627 }
628 EXPORT_SYMBOL(cl_lock_at);
629
630 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
631 {
632         struct cl_thread_counters *counters;
633
634         counters = cl_lock_counters(env, lock);
635         lock->cll_depth++;
636         counters->ctc_nr_locks_locked++;
637         lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
638         cl_lock_trace(D_TRACE, env, "got mutex", lock);
639 }
640
641 /**
642  * Locks cl_lock object.
643  *
644  * This is used to manipulate cl_lock fields, and to serialize state
645  * transitions in the lock state machine.
646  *
647  * \post cl_lock_is_mutexed(lock)
648  *
649  * \see cl_lock_mutex_put()
650  */
651 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
652 {
653         LINVRNT(cl_lock_invariant(env, lock));
654
655         if (lock->cll_guarder == current) {
656                 LINVRNT(cl_lock_is_mutexed(lock));
657                 LINVRNT(lock->cll_depth > 0);
658         } else {
659                 struct cl_object_header *hdr;
660                 struct cl_thread_info   *info;
661                 int i;
662
663                 LINVRNT(lock->cll_guarder != current);
664                 hdr = cl_object_header(lock->cll_descr.cld_obj);
665                 /*
666                  * Check that mutices are taken in the bottom-to-top order.
667                  */
668                 info = cl_env_info(env);
669                 for (i = 0; i < hdr->coh_nesting; ++i)
670                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
671                 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
672                 lock->cll_guarder = current;
673                 LINVRNT(lock->cll_depth == 0);
674         }
675         cl_lock_mutex_tail(env, lock);
676 }
677 EXPORT_SYMBOL(cl_lock_mutex_get);
678
679 /**
680  * Try-locks cl_lock object.
681  *
682  * \retval 0 \a lock was successfully locked
683  *
684  * \retval -EBUSY \a lock cannot be locked right now
685  *
686  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
687  *
688  * \see cl_lock_mutex_get()
689  */
690 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
691 {
692         int result;
693
694         LINVRNT(cl_lock_invariant_trusted(env, lock));
695
696         result = 0;
697         if (lock->cll_guarder == current) {
698                 LINVRNT(lock->cll_depth > 0);
699                 cl_lock_mutex_tail(env, lock);
700         } else if (mutex_trylock(&lock->cll_guard)) {
701                 LINVRNT(lock->cll_depth == 0);
702                 lock->cll_guarder = current;
703                 cl_lock_mutex_tail(env, lock);
704         } else
705                 result = -EBUSY;
706         return result;
707 }
708 EXPORT_SYMBOL(cl_lock_mutex_try);
709
710 /**
711  {* Unlocks cl_lock object.
712  *
713  * \pre cl_lock_is_mutexed(lock)
714  *
715  * \see cl_lock_mutex_get()
716  */
717 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
718 {
719         struct cl_thread_counters *counters;
720
721         LINVRNT(cl_lock_invariant(env, lock));
722         LINVRNT(cl_lock_is_mutexed(lock));
723         LINVRNT(lock->cll_guarder == current);
724         LINVRNT(lock->cll_depth > 0);
725
726         counters = cl_lock_counters(env, lock);
727         LINVRNT(counters->ctc_nr_locks_locked > 0);
728
729         cl_lock_trace(D_TRACE, env, "put mutex", lock);
730         lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
731         counters->ctc_nr_locks_locked--;
732         if (--lock->cll_depth == 0) {
733                 lock->cll_guarder = NULL;
734                 mutex_unlock(&lock->cll_guard);
735         }
736 }
737 EXPORT_SYMBOL(cl_lock_mutex_put);
738
739 /**
740  * Returns true iff lock's mutex is owned by the current thread.
741  */
742 int cl_lock_is_mutexed(struct cl_lock *lock)
743 {
744         return lock->cll_guarder == current;
745 }
746 EXPORT_SYMBOL(cl_lock_is_mutexed);
747
748 /**
749  * Returns number of cl_lock mutices held by the current thread (environment).
750  */
751 int cl_lock_nr_mutexed(const struct lu_env *env)
752 {
753         struct cl_thread_info *info;
754         int i;
755         int locked;
756
757         /*
758          * NOTE: if summation across all nesting levels (currently 2) proves
759          *       too expensive, a summary counter can be added to
760          *       struct cl_thread_info.
761          */
762         info = cl_env_info(env);
763         for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
764                 locked += info->clt_counters[i].ctc_nr_locks_locked;
765         return locked;
766 }
767 EXPORT_SYMBOL(cl_lock_nr_mutexed);
768
769 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
770 {
771         LINVRNT(cl_lock_is_mutexed(lock));
772         LINVRNT(cl_lock_invariant(env, lock));
773         if (!(lock->cll_flags & CLF_CANCELLED)) {
774                 const struct cl_lock_slice *slice;
775
776                 lock->cll_flags |= CLF_CANCELLED;
777                 list_for_each_entry_reverse(slice, &lock->cll_layers,
778                                                 cls_linkage) {
779                         if (slice->cls_ops->clo_cancel != NULL)
780                                 slice->cls_ops->clo_cancel(env, slice);
781                 }
782         }
783 }
784
785 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
786 {
787         struct cl_object_header    *head;
788         const struct cl_lock_slice *slice;
789
790         LINVRNT(cl_lock_is_mutexed(lock));
791         LINVRNT(cl_lock_invariant(env, lock));
792
793         if (lock->cll_state < CLS_FREEING) {
794                 LASSERT(lock->cll_state != CLS_INTRANSIT);
795                 cl_lock_state_set(env, lock, CLS_FREEING);
796
797                 head = cl_object_header(lock->cll_descr.cld_obj);
798
799                 spin_lock(&head->coh_lock_guard);
800                 list_del_init(&lock->cll_linkage);
801                 spin_unlock(&head->coh_lock_guard);
802
803                 /*
804                  * From now on, no new references to this lock can be acquired
805                  * by cl_lock_lookup().
806                  */
807                 list_for_each_entry_reverse(slice, &lock->cll_layers,
808                                                 cls_linkage) {
809                         if (slice->cls_ops->clo_delete != NULL)
810                                 slice->cls_ops->clo_delete(env, slice);
811                 }
812                 /*
813                  * From now on, no new references to this lock can be acquired
814                  * by layer-specific means (like a pointer from struct
815                  * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
816                  * lov).
817                  *
818                  * Lock will be finally freed in cl_lock_put() when last of
819                  * existing references goes away.
820                  */
821         }
822 }
823
824 /**
825  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
826  * top-lock (nesting == 0) accounts for this modification in the per-thread
827  * debugging counters. Sub-lock holds can be released by a thread different
828  * from one that acquired it.
829  */
830 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
831                              int delta)
832 {
833         struct cl_thread_counters *counters;
834         enum clt_nesting_level     nesting;
835
836         lock->cll_holds += delta;
837         nesting = cl_lock_nesting(lock);
838         if (nesting == CNL_TOP) {
839                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
840                 counters->ctc_nr_held += delta;
841                 LASSERT(counters->ctc_nr_held >= 0);
842         }
843 }
844
845 /**
846  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
847  * cl_lock_hold_mod() for the explanation of the debugging code.
848  */
849 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
850                              int delta)
851 {
852         struct cl_thread_counters *counters;
853         enum clt_nesting_level     nesting;
854
855         lock->cll_users += delta;
856         nesting = cl_lock_nesting(lock);
857         if (nesting == CNL_TOP) {
858                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
859                 counters->ctc_nr_used += delta;
860                 LASSERT(counters->ctc_nr_used >= 0);
861         }
862 }
863
864 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
865                           const char *scope, const void *source)
866 {
867         LINVRNT(cl_lock_is_mutexed(lock));
868         LINVRNT(cl_lock_invariant(env, lock));
869         LASSERT(lock->cll_holds > 0);
870
871         cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
872         lu_ref_del(&lock->cll_holders, scope, source);
873         cl_lock_hold_mod(env, lock, -1);
874         if (lock->cll_holds == 0) {
875                 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
876                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
877                     lock->cll_descr.cld_mode == CLM_GROUP ||
878                     lock->cll_state != CLS_CACHED)
879                         /*
880                          * If lock is still phantom or grouplock when user is
881                          * done with it---destroy the lock.
882                          */
883                         lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
884                 if (lock->cll_flags & CLF_CANCELPEND) {
885                         lock->cll_flags &= ~CLF_CANCELPEND;
886                         cl_lock_cancel0(env, lock);
887                 }
888                 if (lock->cll_flags & CLF_DOOMED) {
889                         /* no longer doomed: it's dead... Jim. */
890                         lock->cll_flags &= ~CLF_DOOMED;
891                         cl_lock_delete0(env, lock);
892                 }
893         }
894 }
895 EXPORT_SYMBOL(cl_lock_hold_release);
896
897 /**
898  * Waits until lock state is changed.
899  *
900  * This function is called with cl_lock mutex locked, atomically releases
901  * mutex and goes to sleep, waiting for a lock state change (signaled by
902  * cl_lock_signal()), and re-acquires the mutex before return.
903  *
904  * This function is used to wait until lock state machine makes some progress
905  * and to emulate synchronous operations on top of asynchronous lock
906  * interface.
907  *
908  * \retval -EINTR wait was interrupted
909  *
910  * \retval 0 wait wasn't interrupted
911  *
912  * \pre cl_lock_is_mutexed(lock)
913  *
914  * \see cl_lock_signal()
915  */
916 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
917 {
918         wait_queue_t waiter;
919         sigset_t blocked;
920         int result;
921
922         LINVRNT(cl_lock_is_mutexed(lock));
923         LINVRNT(cl_lock_invariant(env, lock));
924         LASSERT(lock->cll_depth == 1);
925         LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
926
927         cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
928         result = lock->cll_error;
929         if (result == 0) {
930                 /* To avoid being interrupted by the 'non-fatal' signals
931                  * (SIGCHLD, for instance), we'd block them temporarily.
932                  * LU-305 */
933                 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
934
935                 init_waitqueue_entry_current(&waiter);
936                 add_wait_queue(&lock->cll_wq, &waiter);
937                 set_current_state(TASK_INTERRUPTIBLE);
938                 cl_lock_mutex_put(env, lock);
939
940                 LASSERT(cl_lock_nr_mutexed(env) == 0);
941
942                 /* Returning ERESTARTSYS instead of EINTR so syscalls
943                  * can be restarted if signals are pending here */
944                 result = -ERESTARTSYS;
945                 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
946                         waitq_wait(&waiter, TASK_INTERRUPTIBLE);
947                         if (!cfs_signal_pending())
948                                 result = 0;
949                 }
950
951                 cl_lock_mutex_get(env, lock);
952                 set_current_state(TASK_RUNNING);
953                 remove_wait_queue(&lock->cll_wq, &waiter);
954
955                 /* Restore old blocked signals */
956                 cfs_restore_sigs(blocked);
957         }
958         return result;
959 }
960 EXPORT_SYMBOL(cl_lock_state_wait);
961
962 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
963                                  enum cl_lock_state state)
964 {
965         const struct cl_lock_slice *slice;
966
967         LINVRNT(cl_lock_is_mutexed(lock));
968         LINVRNT(cl_lock_invariant(env, lock));
969
970         list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
971                 if (slice->cls_ops->clo_state != NULL)
972                         slice->cls_ops->clo_state(env, slice, state);
973         wake_up_all(&lock->cll_wq);
974 }
975
976 /**
977  * Notifies waiters that lock state changed.
978  *
979  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
980  * layers about state change by calling cl_lock_operations::clo_state()
981  * top-to-bottom.
982  */
983 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
984 {
985         cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
986         cl_lock_state_signal(env, lock, lock->cll_state);
987 }
988 EXPORT_SYMBOL(cl_lock_signal);
989
990 /**
991  * Changes lock state.
992  *
993  * This function is invoked to notify layers that lock state changed, possible
994  * as a result of an asynchronous event such as call-back reception.
995  *
996  * \post lock->cll_state == state
997  *
998  * \see cl_lock_operations::clo_state()
999  */
1000 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1001                        enum cl_lock_state state)
1002 {
1003         LASSERT(lock->cll_state <= state ||
1004                 (lock->cll_state == CLS_CACHED &&
1005                  (state == CLS_HELD || /* lock found in cache */
1006                   state == CLS_NEW  ||   /* sub-lock canceled */
1007                   state == CLS_INTRANSIT)) ||
1008                 /* lock is in transit state */
1009                 lock->cll_state == CLS_INTRANSIT);
1010
1011         if (lock->cll_state != state) {
1012                 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1013                 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1014
1015                 cl_lock_state_signal(env, lock, state);
1016                 lock->cll_state = state;
1017         }
1018 }
1019 EXPORT_SYMBOL(cl_lock_state_set);
1020
1021 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1022 {
1023         const struct cl_lock_slice *slice;
1024         int result;
1025
1026         do {
1027                 result = 0;
1028
1029                 LINVRNT(cl_lock_is_mutexed(lock));
1030                 LINVRNT(cl_lock_invariant(env, lock));
1031                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1032
1033                 result = -ENOSYS;
1034                 list_for_each_entry_reverse(slice, &lock->cll_layers,
1035                                                 cls_linkage) {
1036                         if (slice->cls_ops->clo_unuse != NULL) {
1037                                 result = slice->cls_ops->clo_unuse(env, slice);
1038                                 if (result != 0)
1039                                         break;
1040                         }
1041                 }
1042                 LASSERT(result != -ENOSYS);
1043         } while (result == CLO_REPEAT);
1044
1045         return result;
1046 }
1047
1048 /**
1049  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1050  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1051  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1052  *  use process atomic
1053  */
1054 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1055 {
1056         const struct cl_lock_slice *slice;
1057         int result;
1058         enum cl_lock_state state;
1059
1060         cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1061
1062         LASSERT(lock->cll_state == CLS_CACHED);
1063         if (lock->cll_error)
1064                 return lock->cll_error;
1065
1066         result = -ENOSYS;
1067         state = cl_lock_intransit(env, lock);
1068         list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1069                 if (slice->cls_ops->clo_use != NULL) {
1070                         result = slice->cls_ops->clo_use(env, slice);
1071                         if (result != 0)
1072                                 break;
1073                 }
1074         }
1075         LASSERT(result != -ENOSYS);
1076
1077         LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1078                  lock->cll_state);
1079
1080         if (result == 0) {
1081                 state = CLS_HELD;
1082         } else {
1083                 if (result == -ESTALE) {
1084                         /*
1085                          * ESTALE means sublock being cancelled
1086                          * at this time, and set lock state to
1087                          * be NEW here and ask the caller to repeat.
1088                          */
1089                         state = CLS_NEW;
1090                         result = CLO_REPEAT;
1091                 }
1092
1093                 /* @atomic means back-off-on-failure. */
1094                 if (atomic) {
1095                         int rc;
1096                         rc = cl_unuse_try_internal(env, lock);
1097                         /* Vet the results. */
1098                         if (rc < 0 && result > 0)
1099                                 result = rc;
1100                 }
1101
1102         }
1103         cl_lock_extransit(env, lock, state);
1104         return result;
1105 }
1106 EXPORT_SYMBOL(cl_use_try);
1107
1108 /**
1109  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1110  * top-to-bottom.
1111  */
1112 static int cl_enqueue_kick(const struct lu_env *env,
1113                            struct cl_lock *lock,
1114                            struct cl_io *io, __u32 flags)
1115 {
1116         int result;
1117         const struct cl_lock_slice *slice;
1118
1119         result = -ENOSYS;
1120         list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1121                 if (slice->cls_ops->clo_enqueue != NULL) {
1122                         result = slice->cls_ops->clo_enqueue(env,
1123                                                              slice, io, flags);
1124                         if (result != 0)
1125                                 break;
1126                 }
1127         }
1128         LASSERT(result != -ENOSYS);
1129         return result;
1130 }
1131
1132 /**
1133  * Tries to enqueue a lock.
1134  *
1135  * This function is called repeatedly by cl_enqueue() until either lock is
1136  * enqueued, or error occurs. This function does not block waiting for
1137  * networking communication to complete.
1138  *
1139  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1140  *                       lock->cll_state == CLS_HELD)
1141  *
1142  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1143  * \see cl_lock_state::CLS_ENQUEUED
1144  */
1145 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1146                    struct cl_io *io, __u32 flags)
1147 {
1148         int result;
1149
1150         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1151         do {
1152                 LINVRNT(cl_lock_is_mutexed(lock));
1153
1154                 result = lock->cll_error;
1155                 if (result != 0)
1156                         break;
1157
1158                 switch (lock->cll_state) {
1159                 case CLS_NEW:
1160                         cl_lock_state_set(env, lock, CLS_QUEUING);
1161                         /* fall-through */
1162                 case CLS_QUEUING:
1163                         /* kick layers. */
1164                         result = cl_enqueue_kick(env, lock, io, flags);
1165                         /* For AGL case, the cl_lock::cll_state may
1166                          * become CLS_HELD already. */
1167                         if (result == 0 && lock->cll_state == CLS_QUEUING)
1168                                 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1169                         break;
1170                 case CLS_INTRANSIT:
1171                         LASSERT(cl_lock_is_intransit(lock));
1172                         result = CLO_WAIT;
1173                         break;
1174                 case CLS_CACHED:
1175                         /* yank lock from the cache. */
1176                         result = cl_use_try(env, lock, 0);
1177                         break;
1178                 case CLS_ENQUEUED:
1179                 case CLS_HELD:
1180                         result = 0;
1181                         break;
1182                 default:
1183                 case CLS_FREEING:
1184                         /*
1185                          * impossible, only held locks with increased
1186                          * ->cll_holds can be enqueued, and they cannot be
1187                          * freed.
1188                          */
1189                         LBUG();
1190                 }
1191         } while (result == CLO_REPEAT);
1192         return result;
1193 }
1194 EXPORT_SYMBOL(cl_enqueue_try);
1195
1196 /**
1197  * Cancel the conflicting lock found during previous enqueue.
1198  *
1199  * \retval 0 conflicting lock has been canceled.
1200  * \retval -ve error code.
1201  */
1202 int cl_lock_enqueue_wait(const struct lu_env *env,
1203                          struct cl_lock *lock,
1204                          int keep_mutex)
1205 {
1206         struct cl_lock  *conflict;
1207         int           rc = 0;
1208
1209         LASSERT(cl_lock_is_mutexed(lock));
1210         LASSERT(lock->cll_state == CLS_QUEUING);
1211         LASSERT(lock->cll_conflict != NULL);
1212
1213         conflict = lock->cll_conflict;
1214         lock->cll_conflict = NULL;
1215
1216         cl_lock_mutex_put(env, lock);
1217         LASSERT(cl_lock_nr_mutexed(env) == 0);
1218
1219         cl_lock_mutex_get(env, conflict);
1220         cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1221         cl_lock_cancel(env, conflict);
1222         cl_lock_delete(env, conflict);
1223
1224         while (conflict->cll_state != CLS_FREEING) {
1225                 rc = cl_lock_state_wait(env, conflict);
1226                 if (rc != 0)
1227                         break;
1228         }
1229         cl_lock_mutex_put(env, conflict);
1230         lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1231         cl_lock_put(env, conflict);
1232
1233         if (keep_mutex)
1234                 cl_lock_mutex_get(env, lock);
1235
1236         LASSERT(rc <= 0);
1237         return rc;
1238 }
1239 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1240
1241 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1242                              struct cl_io *io, __u32 enqflags)
1243 {
1244         int result;
1245
1246         LINVRNT(cl_lock_is_mutexed(lock));
1247         LINVRNT(cl_lock_invariant(env, lock));
1248         LASSERT(lock->cll_holds > 0);
1249
1250         cl_lock_user_add(env, lock);
1251         do {
1252                 result = cl_enqueue_try(env, lock, io, enqflags);
1253                 if (result == CLO_WAIT) {
1254                         if (lock->cll_conflict != NULL)
1255                                 result = cl_lock_enqueue_wait(env, lock, 1);
1256                         else
1257                                 result = cl_lock_state_wait(env, lock);
1258                         if (result == 0)
1259                                 continue;
1260                 }
1261                 break;
1262         } while (1);
1263         if (result != 0)
1264                 cl_unuse_try(env, lock);
1265         LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1266                      lock->cll_state == CLS_ENQUEUED ||
1267                      lock->cll_state == CLS_HELD));
1268         return result;
1269 }
1270
1271 /**
1272  * Enqueues a lock.
1273  *
1274  * \pre current thread or io owns a hold on lock.
1275  *
1276  * \post ergo(result == 0, lock->users increased)
1277  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1278  *                       lock->cll_state == CLS_HELD)
1279  */
1280 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1281                struct cl_io *io, __u32 enqflags)
1282 {
1283         int result;
1284
1285         cl_lock_lockdep_acquire(env, lock, enqflags);
1286         cl_lock_mutex_get(env, lock);
1287         result = cl_enqueue_locked(env, lock, io, enqflags);
1288         cl_lock_mutex_put(env, lock);
1289         if (result != 0)
1290                 cl_lock_lockdep_release(env, lock);
1291         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1292                      lock->cll_state == CLS_HELD));
1293         return result;
1294 }
1295 EXPORT_SYMBOL(cl_enqueue);
1296
1297 /**
1298  * Tries to unlock a lock.
1299  *
1300  * This function is called to release underlying resource:
1301  * 1. for top lock, the resource is sublocks it held;
1302  * 2. for sublock, the resource is the reference to dlmlock.
1303  *
1304  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1305  *
1306  * \see cl_unuse() cl_lock_operations::clo_unuse()
1307  * \see cl_lock_state::CLS_CACHED
1308  */
1309 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1310 {
1311         int                      result;
1312         enum cl_lock_state        state = CLS_NEW;
1313
1314         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1315
1316         if (lock->cll_users > 1) {
1317                 cl_lock_user_del(env, lock);
1318                 return 0;
1319         }
1320
1321         /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1322          * underlying resources. */
1323         if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1324                 cl_lock_user_del(env, lock);
1325                 return 0;
1326         }
1327
1328         /*
1329          * New lock users (->cll_users) are not protecting unlocking
1330          * from proceeding. From this point, lock eventually reaches
1331          * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1332          * CLS_FREEING.
1333          */
1334         state = cl_lock_intransit(env, lock);
1335
1336         result = cl_unuse_try_internal(env, lock);
1337         LASSERT(lock->cll_state == CLS_INTRANSIT);
1338         LASSERT(result != CLO_WAIT);
1339         cl_lock_user_del(env, lock);
1340         if (result == 0 || result == -ESTALE) {
1341                 /*
1342                  * Return lock back to the cache. This is the only
1343                  * place where lock is moved into CLS_CACHED state.
1344                  *
1345                  * If one of ->clo_unuse() methods returned -ESTALE, lock
1346                  * cannot be placed into cache and has to be
1347                  * re-initialized. This happens e.g., when a sub-lock was
1348                  * canceled while unlocking was in progress.
1349                  */
1350                 if (state == CLS_HELD && result == 0)
1351                         state = CLS_CACHED;
1352                 else
1353                         state = CLS_NEW;
1354                 cl_lock_extransit(env, lock, state);
1355
1356                 /*
1357                  * Hide -ESTALE error.
1358                  * If the lock is a glimpse lock, and it has multiple
1359                  * stripes. Assuming that one of its sublock returned -ENAVAIL,
1360                  * and other sublocks are matched write locks. In this case,
1361                  * we can't set this lock to error because otherwise some of
1362                  * its sublocks may not be canceled. This causes some dirty
1363                  * pages won't be written to OSTs. -jay
1364                  */
1365                 result = 0;
1366         } else {
1367                 CERROR("result = %d, this is unlikely!\n", result);
1368                 state = CLS_NEW;
1369                 cl_lock_extransit(env, lock, state);
1370         }
1371         return result ?: lock->cll_error;
1372 }
1373 EXPORT_SYMBOL(cl_unuse_try);
1374
1375 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1376 {
1377         int result;
1378
1379         result = cl_unuse_try(env, lock);
1380         if (result)
1381                 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1382 }
1383
1384 /**
1385  * Unlocks a lock.
1386  */
1387 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1388 {
1389         cl_lock_mutex_get(env, lock);
1390         cl_unuse_locked(env, lock);
1391         cl_lock_mutex_put(env, lock);
1392         cl_lock_lockdep_release(env, lock);
1393 }
1394 EXPORT_SYMBOL(cl_unuse);
1395
1396 /**
1397  * Tries to wait for a lock.
1398  *
1399  * This function is called repeatedly by cl_wait() until either lock is
1400  * granted, or error occurs. This function does not block waiting for network
1401  * communication to complete.
1402  *
1403  * \see cl_wait() cl_lock_operations::clo_wait()
1404  * \see cl_lock_state::CLS_HELD
1405  */
1406 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1407 {
1408         const struct cl_lock_slice *slice;
1409         int                      result;
1410
1411         cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1412         do {
1413                 LINVRNT(cl_lock_is_mutexed(lock));
1414                 LINVRNT(cl_lock_invariant(env, lock));
1415                 LASSERTF(lock->cll_state == CLS_QUEUING ||
1416                          lock->cll_state == CLS_ENQUEUED ||
1417                          lock->cll_state == CLS_HELD ||
1418                          lock->cll_state == CLS_INTRANSIT,
1419                          "lock state: %d\n", lock->cll_state);
1420                 LASSERT(lock->cll_users > 0);
1421                 LASSERT(lock->cll_holds > 0);
1422
1423                 result = lock->cll_error;
1424                 if (result != 0)
1425                         break;
1426
1427                 if (cl_lock_is_intransit(lock)) {
1428                         result = CLO_WAIT;
1429                         break;
1430                 }
1431
1432                 if (lock->cll_state == CLS_HELD)
1433                         /* nothing to do */
1434                         break;
1435
1436                 result = -ENOSYS;
1437                 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1438                         if (slice->cls_ops->clo_wait != NULL) {
1439                                 result = slice->cls_ops->clo_wait(env, slice);
1440                                 if (result != 0)
1441                                         break;
1442                         }
1443                 }
1444                 LASSERT(result != -ENOSYS);
1445                 if (result == 0) {
1446                         LASSERT(lock->cll_state != CLS_INTRANSIT);
1447                         cl_lock_state_set(env, lock, CLS_HELD);
1448                 }
1449         } while (result == CLO_REPEAT);
1450         return result;
1451 }
1452 EXPORT_SYMBOL(cl_wait_try);
1453
1454 /**
1455  * Waits until enqueued lock is granted.
1456  *
1457  * \pre current thread or io owns a hold on the lock
1458  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1459  *                      lock->cll_state == CLS_HELD)
1460  *
1461  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1462  */
1463 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1464 {
1465         int result;
1466
1467         cl_lock_mutex_get(env, lock);
1468
1469         LINVRNT(cl_lock_invariant(env, lock));
1470         LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1471                  "Wrong state %d \n", lock->cll_state);
1472         LASSERT(lock->cll_holds > 0);
1473
1474         do {
1475                 result = cl_wait_try(env, lock);
1476                 if (result == CLO_WAIT) {
1477                         result = cl_lock_state_wait(env, lock);
1478                         if (result == 0)
1479                                 continue;
1480                 }
1481                 break;
1482         } while (1);
1483         if (result < 0) {
1484                 cl_unuse_try(env, lock);
1485                 cl_lock_lockdep_release(env, lock);
1486         }
1487         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1488         cl_lock_mutex_put(env, lock);
1489         LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1490         return result;
1491 }
1492 EXPORT_SYMBOL(cl_wait);
1493
1494 /**
1495  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1496  * value.
1497  */
1498 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1499 {
1500         const struct cl_lock_slice *slice;
1501         unsigned long pound;
1502         unsigned long ounce;
1503
1504         LINVRNT(cl_lock_is_mutexed(lock));
1505         LINVRNT(cl_lock_invariant(env, lock));
1506
1507         pound = 0;
1508         list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1509                 if (slice->cls_ops->clo_weigh != NULL) {
1510                         ounce = slice->cls_ops->clo_weigh(env, slice);
1511                         pound += ounce;
1512                         if (pound < ounce) /* over-weight^Wflow */
1513                                 pound = ~0UL;
1514                 }
1515         }
1516         return pound;
1517 }
1518 EXPORT_SYMBOL(cl_lock_weigh);
1519
1520 /**
1521  * Notifies layers that lock description changed.
1522  *
1523  * The server can grant client a lock different from one that was requested
1524  * (e.g., larger in extent). This method is called when actually granted lock
1525  * description becomes known to let layers to accommodate for changed lock
1526  * description.
1527  *
1528  * \see cl_lock_operations::clo_modify()
1529  */
1530 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1531                    const struct cl_lock_descr *desc)
1532 {
1533         const struct cl_lock_slice *slice;
1534         struct cl_object           *obj = lock->cll_descr.cld_obj;
1535         struct cl_object_header    *hdr = cl_object_header(obj);
1536         int result;
1537
1538         cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1539         /* don't allow object to change */
1540         LASSERT(obj == desc->cld_obj);
1541         LINVRNT(cl_lock_is_mutexed(lock));
1542         LINVRNT(cl_lock_invariant(env, lock));
1543
1544         list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1545                 if (slice->cls_ops->clo_modify != NULL) {
1546                         result = slice->cls_ops->clo_modify(env, slice, desc);
1547                         if (result != 0)
1548                                 return result;
1549                 }
1550         }
1551         CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1552                       PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1553         /*
1554          * Just replace description in place. Nothing more is needed for
1555          * now. If locks were indexed according to their extent and/or mode,
1556          * that index would have to be updated here.
1557          */
1558         spin_lock(&hdr->coh_lock_guard);
1559         lock->cll_descr = *desc;
1560         spin_unlock(&hdr->coh_lock_guard);
1561         return 0;
1562 }
1563 EXPORT_SYMBOL(cl_lock_modify);
1564
1565 /**
1566  * Initializes lock closure with a given origin.
1567  *
1568  * \see cl_lock_closure
1569  */
1570 void cl_lock_closure_init(const struct lu_env *env,
1571                           struct cl_lock_closure *closure,
1572                           struct cl_lock *origin, int wait)
1573 {
1574         LINVRNT(cl_lock_is_mutexed(origin));
1575         LINVRNT(cl_lock_invariant(env, origin));
1576
1577         INIT_LIST_HEAD(&closure->clc_list);
1578         closure->clc_origin = origin;
1579         closure->clc_wait   = wait;
1580         closure->clc_nr     = 0;
1581 }
1582 EXPORT_SYMBOL(cl_lock_closure_init);
1583
1584 /**
1585  * Builds a closure of \a lock.
1586  *
1587  * Building of a closure consists of adding initial lock (\a lock) into it,
1588  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1589  * methods might call cl_lock_closure_build() recursively again, adding more
1590  * locks to the closure, etc.
1591  *
1592  * \see cl_lock_closure
1593  */
1594 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1595                           struct cl_lock_closure *closure)
1596 {
1597         const struct cl_lock_slice *slice;
1598         int result;
1599
1600         LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1601         LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1602
1603         result = cl_lock_enclosure(env, lock, closure);
1604         if (result == 0) {
1605                 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1606                         if (slice->cls_ops->clo_closure != NULL) {
1607                                 result = slice->cls_ops->clo_closure(env, slice,
1608                                                                      closure);
1609                                 if (result != 0)
1610                                         break;
1611                         }
1612                 }
1613         }
1614         if (result != 0)
1615                 cl_lock_disclosure(env, closure);
1616         return result;
1617 }
1618 EXPORT_SYMBOL(cl_lock_closure_build);
1619
1620 /**
1621  * Adds new lock to a closure.
1622  *
1623  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1624  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1625  * until next try-lock is likely to succeed.
1626  */
1627 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1628                       struct cl_lock_closure *closure)
1629 {
1630         int result = 0;
1631
1632         cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1633         if (!cl_lock_mutex_try(env, lock)) {
1634                 /*
1635                  * If lock->cll_inclosure is not empty, lock is already in
1636                  * this closure.
1637                  */
1638                 if (list_empty(&lock->cll_inclosure)) {
1639                         cl_lock_get_trust(lock);
1640                         lu_ref_add(&lock->cll_reference, "closure", closure);
1641                         list_add(&lock->cll_inclosure, &closure->clc_list);
1642                         closure->clc_nr++;
1643                 } else
1644                         cl_lock_mutex_put(env, lock);
1645                 result = 0;
1646         } else {
1647                 cl_lock_disclosure(env, closure);
1648                 if (closure->clc_wait) {
1649                         cl_lock_get_trust(lock);
1650                         lu_ref_add(&lock->cll_reference, "closure-w", closure);
1651                         cl_lock_mutex_put(env, closure->clc_origin);
1652
1653                         LASSERT(cl_lock_nr_mutexed(env) == 0);
1654                         cl_lock_mutex_get(env, lock);
1655                         cl_lock_mutex_put(env, lock);
1656
1657                         cl_lock_mutex_get(env, closure->clc_origin);
1658                         lu_ref_del(&lock->cll_reference, "closure-w", closure);
1659                         cl_lock_put(env, lock);
1660                 }
1661                 result = CLO_REPEAT;
1662         }
1663         return result;
1664 }
1665 EXPORT_SYMBOL(cl_lock_enclosure);
1666
1667 /** Releases mutices of enclosed locks. */
1668 void cl_lock_disclosure(const struct lu_env *env,
1669                         struct cl_lock_closure *closure)
1670 {
1671         struct cl_lock *scan;
1672         struct cl_lock *temp;
1673
1674         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1675         list_for_each_entry_safe(scan, temp, &closure->clc_list,
1676                                      cll_inclosure){
1677                 list_del_init(&scan->cll_inclosure);
1678                 cl_lock_mutex_put(env, scan);
1679                 lu_ref_del(&scan->cll_reference, "closure", closure);
1680                 cl_lock_put(env, scan);
1681                 closure->clc_nr--;
1682         }
1683         LASSERT(closure->clc_nr == 0);
1684 }
1685 EXPORT_SYMBOL(cl_lock_disclosure);
1686
1687 /** Finalizes a closure. */
1688 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1689 {
1690         LASSERT(closure->clc_nr == 0);
1691         LASSERT(list_empty(&closure->clc_list));
1692 }
1693 EXPORT_SYMBOL(cl_lock_closure_fini);
1694
1695 /**
1696  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1697  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1698  * destruction until all holds are released. This is called when a decision is
1699  * made to destroy the lock in the future. E.g., when a blocking AST is
1700  * received on it, or fatal communication error happens.
1701  *
1702  * Caller must have a reference on this lock to prevent a situation, when
1703  * deleted lock lingers in memory for indefinite time, because nobody calls
1704  * cl_lock_put() to finish it.
1705  *
1706  * \pre atomic_read(&lock->cll_ref) > 0
1707  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1708  *         cl_lock_nr_mutexed(env) == 1)
1709  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1710  *      held, as deletion of sub-locks might require releasing a top-lock
1711  *      mutex]
1712  *
1713  * \see cl_lock_operations::clo_delete()
1714  * \see cl_lock::cll_holds
1715  */
1716 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1717 {
1718         LINVRNT(cl_lock_is_mutexed(lock));
1719         LINVRNT(cl_lock_invariant(env, lock));
1720         LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1721                      cl_lock_nr_mutexed(env) == 1));
1722
1723         cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1724         if (lock->cll_holds == 0)
1725                 cl_lock_delete0(env, lock);
1726         else
1727                 lock->cll_flags |= CLF_DOOMED;
1728 }
1729 EXPORT_SYMBOL(cl_lock_delete);
1730
1731 /**
1732  * Mark lock as irrecoverably failed, and mark it for destruction. This
1733  * happens when, e.g., server fails to grant a lock to us, or networking
1734  * time-out happens.
1735  *
1736  * \pre atomic_read(&lock->cll_ref) > 0
1737  *
1738  * \see clo_lock_delete()
1739  * \see cl_lock::cll_holds
1740  */
1741 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1742 {
1743         LINVRNT(cl_lock_is_mutexed(lock));
1744         LINVRNT(cl_lock_invariant(env, lock));
1745
1746         if (lock->cll_error == 0 && error != 0) {
1747                 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1748                 lock->cll_error = error;
1749                 cl_lock_signal(env, lock);
1750                 cl_lock_cancel(env, lock);
1751                 cl_lock_delete(env, lock);
1752         }
1753 }
1754 EXPORT_SYMBOL(cl_lock_error);
1755
1756 /**
1757  * Cancels this lock. Notifies layers
1758  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1759  * there are holds on the lock, postpone cancellation until
1760  * all holds are released.
1761  *
1762  * Cancellation notification is delivered to layers at most once.
1763  *
1764  * \see cl_lock_operations::clo_cancel()
1765  * \see cl_lock::cll_holds
1766  */
1767 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1768 {
1769         LINVRNT(cl_lock_is_mutexed(lock));
1770         LINVRNT(cl_lock_invariant(env, lock));
1771
1772         cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1773         if (lock->cll_holds == 0)
1774                 cl_lock_cancel0(env, lock);
1775         else
1776                 lock->cll_flags |= CLF_CANCELPEND;
1777 }
1778 EXPORT_SYMBOL(cl_lock_cancel);
1779
1780 /**
1781  * Finds an existing lock covering given index and optionally different from a
1782  * given \a except lock.
1783  */
1784 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1785                                  struct cl_object *obj, pgoff_t index,
1786                                  struct cl_lock *except,
1787                                  int pending, int canceld)
1788 {
1789         struct cl_object_header *head;
1790         struct cl_lock    *scan;
1791         struct cl_lock    *lock;
1792         struct cl_lock_descr    *need;
1793
1794         head = cl_object_header(obj);
1795         need = &cl_env_info(env)->clt_descr;
1796         lock = NULL;
1797
1798         need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1799                                     * not PHANTOM */
1800         need->cld_start = need->cld_end = index;
1801         need->cld_enq_flags = 0;
1802
1803         spin_lock(&head->coh_lock_guard);
1804         /* It is fine to match any group lock since there could be only one
1805          * with a uniq gid and it conflicts with all other lock modes too */
1806         list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1807                 if (scan != except &&
1808                     (scan->cll_descr.cld_mode == CLM_GROUP ||
1809                     cl_lock_ext_match(&scan->cll_descr, need)) &&
1810                     scan->cll_state >= CLS_HELD &&
1811                     scan->cll_state < CLS_FREEING &&
1812                     /*
1813                      * This check is racy as the lock can be canceled right
1814                      * after it is done, but this is fine, because page exists
1815                      * already.
1816                      */
1817                     (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1818                     (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1819                         /* Don't increase cs_hit here since this
1820                          * is just a helper function. */
1821                         cl_lock_get_trust(scan);
1822                         lock = scan;
1823                         break;
1824                 }
1825         }
1826         spin_unlock(&head->coh_lock_guard);
1827         return lock;
1828 }
1829 EXPORT_SYMBOL(cl_lock_at_pgoff);
1830
1831 /**
1832  * Calculate the page offset at the layer of @lock.
1833  * At the time of this writing, @page is top page and @lock is sub lock.
1834  */
1835 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1836 {
1837         struct lu_device_type *dtype;
1838         const struct cl_page_slice *slice;
1839
1840         dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1841         slice = cl_page_at(page, dtype);
1842         LASSERT(slice != NULL);
1843         return slice->cpl_page->cp_index;
1844 }
1845
1846 /**
1847  * Check if page @page is covered by an extra lock or discard it.
1848  */
1849 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1850                                 struct cl_page *page, void *cbdata)
1851 {
1852         struct cl_thread_info *info = cl_env_info(env);
1853         struct cl_lock *lock = cbdata;
1854         pgoff_t index = pgoff_at_lock(page, lock);
1855
1856         if (index >= info->clt_fn_index) {
1857                 struct cl_lock *tmp;
1858
1859                 /* refresh non-overlapped index */
1860                 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1861                                         lock, 1, 0);
1862                 if (tmp != NULL) {
1863                         /* Cache the first-non-overlapped index so as to skip
1864                          * all pages within [index, clt_fn_index). This
1865                          * is safe because if tmp lock is canceled, it will
1866                          * discard these pages. */
1867                         info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1868                         if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1869                                 info->clt_fn_index = CL_PAGE_EOF;
1870                         cl_lock_put(env, tmp);
1871                 } else if (cl_page_own(env, io, page) == 0) {
1872                         /* discard the page */
1873                         cl_page_unmap(env, io, page);
1874                         cl_page_discard(env, io, page);
1875                         cl_page_disown(env, io, page);
1876                 } else {
1877                         LASSERT(page->cp_state == CPS_FREEING);
1878                 }
1879         }
1880
1881         info->clt_next_index = index + 1;
1882         return CLP_GANG_OKAY;
1883 }
1884
1885 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1886                       struct cl_page *page, void *cbdata)
1887 {
1888         struct cl_thread_info *info = cl_env_info(env);
1889         struct cl_lock *lock   = cbdata;
1890
1891         LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1892         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1893                       !PageWriteback(cl_page_vmpage(env, page))));
1894         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1895                       !PageDirty(cl_page_vmpage(env, page))));
1896
1897         info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1898         if (cl_page_own(env, io, page) == 0) {
1899                 /* discard the page */
1900                 cl_page_unmap(env, io, page);
1901                 cl_page_discard(env, io, page);
1902                 cl_page_disown(env, io, page);
1903         } else {
1904                 LASSERT(page->cp_state == CPS_FREEING);
1905         }
1906
1907         return CLP_GANG_OKAY;
1908 }
1909
1910 /**
1911  * Discard pages protected by the given lock. This function traverses radix
1912  * tree to find all covering pages and discard them. If a page is being covered
1913  * by other locks, it should remain in cache.
1914  *
1915  * If error happens on any step, the process continues anyway (the reasoning
1916  * behind this being that lock cancellation cannot be delayed indefinitely).
1917  */
1918 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1919 {
1920         struct cl_thread_info *info  = cl_env_info(env);
1921         struct cl_io      *io    = &info->clt_io;
1922         struct cl_lock_descr  *descr = &lock->cll_descr;
1923         cl_page_gang_cb_t      cb;
1924         int res;
1925         int result;
1926
1927         LINVRNT(cl_lock_invariant(env, lock));
1928
1929         io->ci_obj = cl_object_top(descr->cld_obj);
1930         io->ci_ignore_layout = 1;
1931         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1932         if (result != 0)
1933                 GOTO(out, result);
1934
1935         cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1936         info->clt_fn_index = info->clt_next_index = descr->cld_start;
1937         do {
1938                 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1939                                           info->clt_next_index, descr->cld_end,
1940                                           cb, (void *)lock);
1941                 if (info->clt_next_index > descr->cld_end)
1942                         break;
1943
1944                 if (res == CLP_GANG_RESCHED)
1945                         cond_resched();
1946         } while (res != CLP_GANG_OKAY);
1947 out:
1948         cl_io_fini(env, io);
1949         return result;
1950 }
1951 EXPORT_SYMBOL(cl_lock_discard_pages);
1952
1953 /**
1954  * Eliminate all locks for a given object.
1955  *
1956  * Caller has to guarantee that no lock is in active use.
1957  *
1958  * \param cancel when this is set, cl_locks_prune() cancels locks before
1959  *             destroying.
1960  */
1961 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1962 {
1963         struct cl_object_header *head;
1964         struct cl_lock    *lock;
1965
1966         head = cl_object_header(obj);
1967         /*
1968          * If locks are destroyed without cancellation, all pages must be
1969          * already destroyed (as otherwise they will be left unprotected).
1970          */
1971         LASSERT(ergo(!cancel,
1972                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
1973
1974         spin_lock(&head->coh_lock_guard);
1975         while (!list_empty(&head->coh_locks)) {
1976                 lock = container_of(head->coh_locks.next,
1977                                     struct cl_lock, cll_linkage);
1978                 cl_lock_get_trust(lock);
1979                 spin_unlock(&head->coh_lock_guard);
1980                 lu_ref_add(&lock->cll_reference, "prune", current);
1981
1982 again:
1983                 cl_lock_mutex_get(env, lock);
1984                 if (lock->cll_state < CLS_FREEING) {
1985                         LASSERT(lock->cll_users <= 1);
1986                         if (unlikely(lock->cll_users == 1)) {
1987                                 struct l_wait_info lwi = { 0 };
1988
1989                                 cl_lock_mutex_put(env, lock);
1990                                 l_wait_event(lock->cll_wq,
1991                                              lock->cll_users == 0,
1992                                              &lwi);
1993                                 goto again;
1994                         }
1995
1996                         if (cancel)
1997                                 cl_lock_cancel(env, lock);
1998                         cl_lock_delete(env, lock);
1999                 }
2000                 cl_lock_mutex_put(env, lock);
2001                 lu_ref_del(&lock->cll_reference, "prune", current);
2002                 cl_lock_put(env, lock);
2003                 spin_lock(&head->coh_lock_guard);
2004         }
2005         spin_unlock(&head->coh_lock_guard);
2006 }
2007 EXPORT_SYMBOL(cl_locks_prune);
2008
2009 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2010                                           const struct cl_io *io,
2011                                           const struct cl_lock_descr *need,
2012                                           const char *scope, const void *source)
2013 {
2014         struct cl_lock *lock;
2015
2016         while (1) {
2017                 lock = cl_lock_find(env, io, need);
2018                 if (IS_ERR(lock))
2019                         break;
2020                 cl_lock_mutex_get(env, lock);
2021                 if (lock->cll_state < CLS_FREEING &&
2022                     !(lock->cll_flags & CLF_CANCELLED)) {
2023                         cl_lock_hold_mod(env, lock, +1);
2024                         lu_ref_add(&lock->cll_holders, scope, source);
2025                         lu_ref_add(&lock->cll_reference, scope, source);
2026                         break;
2027                 }
2028                 cl_lock_mutex_put(env, lock);
2029                 cl_lock_put(env, lock);
2030         }
2031         return lock;
2032 }
2033
2034 /**
2035  * Returns a lock matching \a need description with a reference and a hold on
2036  * it.
2037  *
2038  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2039  * guarantees that lock is not in the CLS_FREEING state on return.
2040  */
2041 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2042                              const struct cl_lock_descr *need,
2043                              const char *scope, const void *source)
2044 {
2045         struct cl_lock *lock;
2046
2047         lock = cl_lock_hold_mutex(env, io, need, scope, source);
2048         if (!IS_ERR(lock))
2049                 cl_lock_mutex_put(env, lock);
2050         return lock;
2051 }
2052 EXPORT_SYMBOL(cl_lock_hold);
2053
2054 /**
2055  * Main high-level entry point of cl_lock interface that finds existing or
2056  * enqueues new lock matching given description.
2057  */
2058 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2059                                 const struct cl_lock_descr *need,
2060                                 const char *scope, const void *source)
2061 {
2062         struct cl_lock       *lock;
2063         int                rc;
2064         __u32            enqflags = need->cld_enq_flags;
2065
2066         do {
2067                 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2068                 if (IS_ERR(lock))
2069                         break;
2070
2071                 rc = cl_enqueue_locked(env, lock, io, enqflags);
2072                 if (rc == 0) {
2073                         if (cl_lock_fits_into(env, lock, need, io)) {
2074                                 if (!(enqflags & CEF_AGL)) {
2075                                         cl_lock_mutex_put(env, lock);
2076                                         cl_lock_lockdep_acquire(env, lock,
2077                                                                 enqflags);
2078                                         break;
2079                                 }
2080                                 rc = 1;
2081                         }
2082                         cl_unuse_locked(env, lock);
2083                 }
2084                 cl_lock_trace(D_DLMTRACE, env,
2085                               rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2086                 cl_lock_hold_release(env, lock, scope, source);
2087                 cl_lock_mutex_put(env, lock);
2088                 lu_ref_del(&lock->cll_reference, scope, source);
2089                 cl_lock_put(env, lock);
2090                 if (rc > 0) {
2091                         LASSERT(enqflags & CEF_AGL);
2092                         lock = NULL;
2093                 } else if (rc != 0) {
2094                         lock = ERR_PTR(rc);
2095                 }
2096         } while (rc == 0);
2097         return lock;
2098 }
2099 EXPORT_SYMBOL(cl_lock_request);
2100
2101 /**
2102  * Adds a hold to a known lock.
2103  */
2104 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2105                       const char *scope, const void *source)
2106 {
2107         LINVRNT(cl_lock_is_mutexed(lock));
2108         LINVRNT(cl_lock_invariant(env, lock));
2109         LASSERT(lock->cll_state != CLS_FREEING);
2110
2111         cl_lock_hold_mod(env, lock, +1);
2112         cl_lock_get(lock);
2113         lu_ref_add(&lock->cll_holders, scope, source);
2114         lu_ref_add(&lock->cll_reference, scope, source);
2115 }
2116 EXPORT_SYMBOL(cl_lock_hold_add);
2117
2118 /**
2119  * Releases a hold and a reference on a lock, on which caller acquired a
2120  * mutex.
2121  */
2122 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2123                     const char *scope, const void *source)
2124 {
2125         LINVRNT(cl_lock_invariant(env, lock));
2126         cl_lock_hold_release(env, lock, scope, source);
2127         lu_ref_del(&lock->cll_reference, scope, source);
2128         cl_lock_put(env, lock);
2129 }
2130 EXPORT_SYMBOL(cl_lock_unhold);
2131
2132 /**
2133  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2134  */
2135 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2136                      const char *scope, const void *source)
2137 {
2138         LINVRNT(cl_lock_invariant(env, lock));
2139         cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2140         cl_lock_mutex_get(env, lock);
2141         cl_lock_hold_release(env, lock, scope, source);
2142         cl_lock_mutex_put(env, lock);
2143         lu_ref_del(&lock->cll_reference, scope, source);
2144         cl_lock_put(env, lock);
2145 }
2146 EXPORT_SYMBOL(cl_lock_release);
2147
2148 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2149 {
2150         LINVRNT(cl_lock_is_mutexed(lock));
2151         LINVRNT(cl_lock_invariant(env, lock));
2152
2153         cl_lock_used_mod(env, lock, +1);
2154 }
2155 EXPORT_SYMBOL(cl_lock_user_add);
2156
2157 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2158 {
2159         LINVRNT(cl_lock_is_mutexed(lock));
2160         LINVRNT(cl_lock_invariant(env, lock));
2161         LASSERT(lock->cll_users > 0);
2162
2163         cl_lock_used_mod(env, lock, -1);
2164         if (lock->cll_users == 0)
2165                 wake_up_all(&lock->cll_wq);
2166 }
2167 EXPORT_SYMBOL(cl_lock_user_del);
2168
2169 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2170 {
2171         static const char *names[] = {
2172                 [CLM_PHANTOM] = "P",
2173                 [CLM_READ]    = "R",
2174                 [CLM_WRITE]   = "W",
2175                 [CLM_GROUP]   = "G"
2176         };
2177         if (0 <= mode && mode < ARRAY_SIZE(names))
2178                 return names[mode];
2179         else
2180                 return "U";
2181 }
2182 EXPORT_SYMBOL(cl_lock_mode_name);
2183
2184 /**
2185  * Prints human readable representation of a lock description.
2186  */
2187 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2188                        lu_printer_t printer,
2189                        const struct cl_lock_descr *descr)
2190 {
2191         const struct lu_fid  *fid;
2192
2193         fid = lu_object_fid(&descr->cld_obj->co_lu);
2194         (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2195 }
2196 EXPORT_SYMBOL(cl_lock_descr_print);
2197
2198 /**
2199  * Prints human readable representation of \a lock to the \a f.
2200  */
2201 void cl_lock_print(const struct lu_env *env, void *cookie,
2202                    lu_printer_t printer, const struct cl_lock *lock)
2203 {
2204         const struct cl_lock_slice *slice;
2205         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2206                    lock, atomic_read(&lock->cll_ref),
2207                    lock->cll_state, lock->cll_error, lock->cll_holds,
2208                    lock->cll_users, lock->cll_flags);
2209         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2210         (*printer)(env, cookie, " {\n");
2211
2212         list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2213                 (*printer)(env, cookie, "    %s@%p: ",
2214                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2215                            slice);
2216                 if (slice->cls_ops->clo_print != NULL)
2217                         slice->cls_ops->clo_print(env, cookie, printer, slice);
2218                 (*printer)(env, cookie, "\n");
2219         }
2220         (*printer)(env, cookie, "} lock@%p\n", lock);
2221 }
2222 EXPORT_SYMBOL(cl_lock_print);
2223
2224 int cl_lock_init(void)
2225 {
2226         return lu_kmem_init(cl_lock_caches);
2227 }
2228
2229 void cl_lock_fini(void)
2230 {
2231         lu_kmem_fini(cl_lock_caches);
2232 }