]> Pileus Git - ~andy/linux/blob - drivers/staging/lustre/lustre/libcfs/upcall_cache.c
Merge branch 'timers/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[~andy/linux] / drivers / staging / lustre / lustre / libcfs / upcall_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/upcall_cache.c
37  *
38  * Supplementary groups cache.
39  */
40 #define DEBUG_SUBSYSTEM S_SEC
41
42 #include <linux/libcfs/lucache.h>
43
44 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
45                                               __u64 key, void *args)
46 {
47         struct upcall_cache_entry *entry;
48
49         LIBCFS_ALLOC(entry, sizeof(*entry));
50         if (!entry)
51                 return NULL;
52
53         UC_CACHE_SET_NEW(entry);
54         INIT_LIST_HEAD(&entry->ue_hash);
55         entry->ue_key = key;
56         atomic_set(&entry->ue_refcount, 0);
57         init_waitqueue_head(&entry->ue_waitq);
58         if (cache->uc_ops->init_entry)
59                 cache->uc_ops->init_entry(entry, args);
60         return entry;
61 }
62
63 /* protected by cache lock */
64 static void free_entry(struct upcall_cache *cache,
65                        struct upcall_cache_entry *entry)
66 {
67         if (cache->uc_ops->free_entry)
68                 cache->uc_ops->free_entry(cache, entry);
69
70         list_del(&entry->ue_hash);
71         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
72                entry, entry->ue_key);
73         LIBCFS_FREE(entry, sizeof(*entry));
74 }
75
76 static inline int upcall_compare(struct upcall_cache *cache,
77                                  struct upcall_cache_entry *entry,
78                                  __u64 key, void *args)
79 {
80         if (entry->ue_key != key)
81                 return -1;
82
83         if (cache->uc_ops->upcall_compare)
84                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
85
86         return 0;
87 }
88
89 static inline int downcall_compare(struct upcall_cache *cache,
90                                    struct upcall_cache_entry *entry,
91                                    __u64 key, void *args)
92 {
93         if (entry->ue_key != key)
94                 return -1;
95
96         if (cache->uc_ops->downcall_compare)
97                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
98
99         return 0;
100 }
101
102 static inline void get_entry(struct upcall_cache_entry *entry)
103 {
104         atomic_inc(&entry->ue_refcount);
105 }
106
107 static inline void put_entry(struct upcall_cache *cache,
108                              struct upcall_cache_entry *entry)
109 {
110         if (atomic_dec_and_test(&entry->ue_refcount) &&
111             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
112                 free_entry(cache, entry);
113         }
114 }
115
116 static int check_unlink_entry(struct upcall_cache *cache,
117                               struct upcall_cache_entry *entry)
118 {
119         if (UC_CACHE_IS_VALID(entry) &&
120             cfs_time_before(cfs_time_current(), entry->ue_expire))
121                 return 0;
122
123         if (UC_CACHE_IS_ACQUIRING(entry)) {
124                 if (entry->ue_acquire_expire == 0 ||
125                     cfs_time_before(cfs_time_current(),
126                                     entry->ue_acquire_expire))
127                         return 0;
128
129                 UC_CACHE_SET_EXPIRED(entry);
130                 wake_up_all(&entry->ue_waitq);
131         } else if (!UC_CACHE_IS_INVALID(entry)) {
132                 UC_CACHE_SET_EXPIRED(entry);
133         }
134
135         list_del_init(&entry->ue_hash);
136         if (!atomic_read(&entry->ue_refcount))
137                 free_entry(cache, entry);
138         return 1;
139 }
140
141 static inline int refresh_entry(struct upcall_cache *cache,
142                          struct upcall_cache_entry *entry)
143 {
144         LASSERT(cache->uc_ops->do_upcall);
145         return cache->uc_ops->do_upcall(cache, entry);
146 }
147
148 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
149                                                   __u64 key, void *args)
150 {
151         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
152         struct list_head *head;
153         wait_queue_t wait;
154         int rc, found;
155
156         LASSERT(cache);
157
158         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
159 find_again:
160         found = 0;
161         spin_lock(&cache->uc_lock);
162         list_for_each_entry_safe(entry, next, head, ue_hash) {
163                 /* check invalid & expired items */
164                 if (check_unlink_entry(cache, entry))
165                         continue;
166                 if (upcall_compare(cache, entry, key, args) == 0) {
167                         found = 1;
168                         break;
169                 }
170         }
171
172         if (!found) {
173                 if (!new) {
174                         spin_unlock(&cache->uc_lock);
175                         new = alloc_entry(cache, key, args);
176                         if (!new) {
177                                 CERROR("fail to alloc entry\n");
178                                 return ERR_PTR(-ENOMEM);
179                         }
180                         goto find_again;
181                 } else {
182                         list_add(&new->ue_hash, head);
183                         entry = new;
184                 }
185         } else {
186                 if (new) {
187                         free_entry(cache, new);
188                         new = NULL;
189                 }
190                 list_move(&entry->ue_hash, head);
191         }
192         get_entry(entry);
193
194         /* acquire for new one */
195         if (UC_CACHE_IS_NEW(entry)) {
196                 UC_CACHE_SET_ACQUIRING(entry);
197                 UC_CACHE_CLEAR_NEW(entry);
198                 spin_unlock(&cache->uc_lock);
199                 rc = refresh_entry(cache, entry);
200                 spin_lock(&cache->uc_lock);
201                 entry->ue_acquire_expire =
202                         cfs_time_shift(cache->uc_acquire_expire);
203                 if (rc < 0) {
204                         UC_CACHE_CLEAR_ACQUIRING(entry);
205                         UC_CACHE_SET_INVALID(entry);
206                         wake_up_all(&entry->ue_waitq);
207                         if (unlikely(rc == -EREMCHG)) {
208                                 put_entry(cache, entry);
209                                 GOTO(out, entry = ERR_PTR(rc));
210                         }
211                 }
212         }
213         /* someone (and only one) is doing upcall upon this item,
214          * wait it to complete */
215         if (UC_CACHE_IS_ACQUIRING(entry)) {
216                 long expiry = (entry == new) ?
217                               cfs_time_seconds(cache->uc_acquire_expire) :
218                               MAX_SCHEDULE_TIMEOUT;
219                 long left;
220
221                 init_waitqueue_entry_current(&wait);
222                 add_wait_queue(&entry->ue_waitq, &wait);
223                 set_current_state(TASK_INTERRUPTIBLE);
224                 spin_unlock(&cache->uc_lock);
225
226                 left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
227                                            expiry);
228
229                 spin_lock(&cache->uc_lock);
230                 remove_wait_queue(&entry->ue_waitq, &wait);
231                 if (UC_CACHE_IS_ACQUIRING(entry)) {
232                         /* we're interrupted or upcall failed in the middle */
233                         rc = left > 0 ? -EINTR : -ETIMEDOUT;
234                         CERROR("acquire for key "LPU64": error %d\n",
235                                entry->ue_key, rc);
236                         put_entry(cache, entry);
237                         GOTO(out, entry = ERR_PTR(rc));
238                 }
239         }
240
241         /* invalid means error, don't need to try again */
242         if (UC_CACHE_IS_INVALID(entry)) {
243                 put_entry(cache, entry);
244                 GOTO(out, entry = ERR_PTR(-EIDRM));
245         }
246
247         /* check expired
248          * We can't refresh the existing one because some
249          * memory might be shared by multiple processes.
250          */
251         if (check_unlink_entry(cache, entry)) {
252                 /* if expired, try again. but if this entry is
253                  * created by me but too quickly turn to expired
254                  * without any error, should at least give a
255                  * chance to use it once.
256                  */
257                 if (entry != new) {
258                         put_entry(cache, entry);
259                         spin_unlock(&cache->uc_lock);
260                         new = NULL;
261                         goto find_again;
262                 }
263         }
264
265         /* Now we know it's good */
266 out:
267         spin_unlock(&cache->uc_lock);
268         return entry;
269 }
270 EXPORT_SYMBOL(upcall_cache_get_entry);
271
272 void upcall_cache_put_entry(struct upcall_cache *cache,
273                             struct upcall_cache_entry *entry)
274 {
275         if (!entry) {
276                 return;
277         }
278
279         LASSERT(atomic_read(&entry->ue_refcount) > 0);
280         spin_lock(&cache->uc_lock);
281         put_entry(cache, entry);
282         spin_unlock(&cache->uc_lock);
283 }
284 EXPORT_SYMBOL(upcall_cache_put_entry);
285
286 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
287                           void *args)
288 {
289         struct upcall_cache_entry *entry = NULL;
290         struct list_head *head;
291         int found = 0, rc = 0;
292
293         LASSERT(cache);
294
295         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
296
297         spin_lock(&cache->uc_lock);
298         list_for_each_entry(entry, head, ue_hash) {
299                 if (downcall_compare(cache, entry, key, args) == 0) {
300                         found = 1;
301                         get_entry(entry);
302                         break;
303                 }
304         }
305
306         if (!found) {
307                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
308                        cache->uc_name, key);
309                 /* haven't found, it's possible */
310                 spin_unlock(&cache->uc_lock);
311                 return -EINVAL;
312         }
313
314         if (err) {
315                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
316                        cache->uc_name, entry->ue_key, err);
317                 GOTO(out, rc = -EINVAL);
318         }
319
320         if (!UC_CACHE_IS_ACQUIRING(entry)) {
321                 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
322                        cache->uc_name, entry, entry->ue_key);
323                 GOTO(out, rc = 0);
324         }
325
326         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
327                 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
328                        cache->uc_name, entry, entry->ue_key);
329                 GOTO(out, rc = -EINVAL);
330         }
331
332         spin_unlock(&cache->uc_lock);
333         if (cache->uc_ops->parse_downcall)
334                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
335         spin_lock(&cache->uc_lock);
336         if (rc)
337                 GOTO(out, rc);
338
339         entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
340         UC_CACHE_SET_VALID(entry);
341         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
342                cache->uc_name, entry, entry->ue_key);
343 out:
344         if (rc) {
345                 UC_CACHE_SET_INVALID(entry);
346                 list_del_init(&entry->ue_hash);
347         }
348         UC_CACHE_CLEAR_ACQUIRING(entry);
349         spin_unlock(&cache->uc_lock);
350         wake_up_all(&entry->ue_waitq);
351         put_entry(cache, entry);
352
353         return rc;
354 }
355 EXPORT_SYMBOL(upcall_cache_downcall);
356
357 static void cache_flush(struct upcall_cache *cache, int force)
358 {
359         struct upcall_cache_entry *entry, *next;
360         int i;
361
362         spin_lock(&cache->uc_lock);
363         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
364                 list_for_each_entry_safe(entry, next,
365                                          &cache->uc_hashtable[i], ue_hash) {
366                         if (!force && atomic_read(&entry->ue_refcount)) {
367                                 UC_CACHE_SET_EXPIRED(entry);
368                                 continue;
369                         }
370                         LASSERT(!atomic_read(&entry->ue_refcount));
371                         free_entry(cache, entry);
372                 }
373         }
374         spin_unlock(&cache->uc_lock);
375 }
376
377 void upcall_cache_flush_idle(struct upcall_cache *cache)
378 {
379         cache_flush(cache, 0);
380 }
381 EXPORT_SYMBOL(upcall_cache_flush_idle);
382
383 void upcall_cache_flush_all(struct upcall_cache *cache)
384 {
385         cache_flush(cache, 1);
386 }
387 EXPORT_SYMBOL(upcall_cache_flush_all);
388
389 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
390 {
391         struct list_head *head;
392         struct upcall_cache_entry *entry;
393         int found = 0;
394
395         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
396
397         spin_lock(&cache->uc_lock);
398         list_for_each_entry(entry, head, ue_hash) {
399                 if (upcall_compare(cache, entry, key, args) == 0) {
400                         found = 1;
401                         break;
402                 }
403         }
404
405         if (found) {
406                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
407                       "cur %lu, ex %ld/%ld\n",
408                       cache->uc_name, entry, entry->ue_key,
409                       atomic_read(&entry->ue_refcount), entry->ue_flags,
410                       cfs_time_current_sec(), entry->ue_acquire_expire,
411                       entry->ue_expire);
412                 UC_CACHE_SET_EXPIRED(entry);
413                 if (!atomic_read(&entry->ue_refcount))
414                         free_entry(cache, entry);
415         }
416         spin_unlock(&cache->uc_lock);
417 }
418 EXPORT_SYMBOL(upcall_cache_flush_one);
419
420 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
421                                        struct upcall_cache_ops *ops)
422 {
423         struct upcall_cache *cache;
424         int i;
425
426         LIBCFS_ALLOC(cache, sizeof(*cache));
427         if (!cache)
428                 return ERR_PTR(-ENOMEM);
429
430         spin_lock_init(&cache->uc_lock);
431         rwlock_init(&cache->uc_upcall_rwlock);
432         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
433                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
434         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
435         /* upcall pathname proc tunable */
436         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
437         cache->uc_entry_expire = 20 * 60;
438         cache->uc_acquire_expire = 30;
439         cache->uc_ops = ops;
440
441         return cache;
442 }
443 EXPORT_SYMBOL(upcall_cache_init);
444
445 void upcall_cache_cleanup(struct upcall_cache *cache)
446 {
447         if (!cache)
448                 return;
449         upcall_cache_flush_all(cache);
450         LIBCFS_FREE(cache, sizeof(*cache));
451 }
452 EXPORT_SYMBOL(upcall_cache_cleanup);