]> Pileus Git - ~andy/linux/blob - drivers/staging/lustre/lustre/libcfs/hash.c
Merge tag 'renesas-fixes3-for-v3.12' of git://git.kernel.org/pub/scm/linux/kernel...
[~andy/linux] / drivers / staging / lustre / lustre / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(intead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can speicify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109
110 #include <linux/libcfs/libcfs.h>
111 #include <linux/seq_file.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
116                 "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
122 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
126
127 static inline void
128 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
129 {
130         spin_lock(&lock->spin);
131 }
132
133 static inline void
134 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
135 {
136         spin_unlock(&lock->spin);
137 }
138
139 static inline void
140 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
141 {
142         if (!exclusive)
143                 read_lock(&lock->rw);
144         else
145                 write_lock(&lock->rw);
146 }
147
148 static inline void
149 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
150 {
151         if (!exclusive)
152                 read_unlock(&lock->rw);
153         else
154                 write_unlock(&lock->rw);
155 }
156
157 /** No lock hash */
158 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
159 {
160         .hs_lock        = cfs_hash_nl_lock,
161         .hs_unlock      = cfs_hash_nl_unlock,
162         .hs_bkt_lock    = cfs_hash_nl_lock,
163         .hs_bkt_unlock  = cfs_hash_nl_unlock,
164 };
165
166 /** no bucket lock, one spinlock to protect everything */
167 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
168 {
169         .hs_lock        = cfs_hash_spin_lock,
170         .hs_unlock      = cfs_hash_spin_unlock,
171         .hs_bkt_lock    = cfs_hash_nl_lock,
172         .hs_bkt_unlock  = cfs_hash_nl_unlock,
173 };
174
175 /** spin bucket lock, rehash is enabled */
176 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
177 {
178         .hs_lock        = cfs_hash_rw_lock,
179         .hs_unlock      = cfs_hash_rw_unlock,
180         .hs_bkt_lock    = cfs_hash_spin_lock,
181         .hs_bkt_unlock  = cfs_hash_spin_unlock,
182 };
183
184 /** rw bucket lock, rehash is enabled */
185 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
186 {
187         .hs_lock        = cfs_hash_rw_lock,
188         .hs_unlock      = cfs_hash_rw_unlock,
189         .hs_bkt_lock    = cfs_hash_rw_lock,
190         .hs_bkt_unlock  = cfs_hash_rw_unlock,
191 };
192
193 /** spin bucket lock, rehash is disabled */
194 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
195 {
196         .hs_lock        = cfs_hash_nl_lock,
197         .hs_unlock      = cfs_hash_nl_unlock,
198         .hs_bkt_lock    = cfs_hash_spin_lock,
199         .hs_bkt_unlock  = cfs_hash_spin_unlock,
200 };
201
202 /** rw bucket lock, rehash is disabled */
203 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
204 {
205         .hs_lock        = cfs_hash_nl_lock,
206         .hs_unlock      = cfs_hash_nl_unlock,
207         .hs_bkt_lock    = cfs_hash_rw_lock,
208         .hs_bkt_unlock  = cfs_hash_rw_unlock,
209 };
210
211 static void
212 cfs_hash_lock_setup(cfs_hash_t *hs)
213 {
214         if (cfs_hash_with_no_lock(hs)) {
215                 hs->hs_lops = &cfs_hash_nl_lops;
216
217         } else if (cfs_hash_with_no_bktlock(hs)) {
218                 hs->hs_lops = &cfs_hash_nbl_lops;
219                 spin_lock_init(&hs->hs_lock.spin);
220
221         } else if (cfs_hash_with_rehash(hs)) {
222                 rwlock_init(&hs->hs_lock.rw);
223
224                 if (cfs_hash_with_rw_bktlock(hs))
225                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
226                 else if (cfs_hash_with_spin_bktlock(hs))
227                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
228                 else
229                         LBUG();
230         } else {
231                 if (cfs_hash_with_rw_bktlock(hs))
232                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
233                 else if (cfs_hash_with_spin_bktlock(hs))
234                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
235                 else
236                         LBUG();
237         }
238 }
239
240 /**
241  * Simple hash head without depth tracking
242  * new element is always added to head of hlist
243  */
244 typedef struct {
245         struct hlist_head       hh_head;        /**< entries list */
246 } cfs_hash_head_t;
247
248 static int
249 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
250 {
251         return sizeof(cfs_hash_head_t);
252 }
253
254 static struct hlist_head *
255 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
256 {
257         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
258
259         return &head[bd->bd_offset].hh_head;
260 }
261
262 static int
263 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
264                       struct hlist_node *hnode)
265 {
266         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
267         return -1; /* unknown depth */
268 }
269
270 static int
271 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
272                       struct hlist_node *hnode)
273 {
274         hlist_del_init(hnode);
275         return -1; /* unknown depth */
276 }
277
278 /**
279  * Simple hash head with depth tracking
280  * new element is always added to head of hlist
281  */
282 typedef struct {
283         struct hlist_head       hd_head;        /**< entries list */
284         unsigned int        hd_depth;       /**< list length */
285 } cfs_hash_head_dep_t;
286
287 static int
288 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
289 {
290         return sizeof(cfs_hash_head_dep_t);
291 }
292
293 static struct hlist_head *
294 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
295 {
296         cfs_hash_head_dep_t   *head;
297
298         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
299         return &head[bd->bd_offset].hd_head;
300 }
301
302 static int
303 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
304                       struct hlist_node *hnode)
305 {
306         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
307                                                cfs_hash_head_dep_t, hd_head);
308         hlist_add_head(hnode, &hh->hd_head);
309         return ++hh->hd_depth;
310 }
311
312 static int
313 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
314                       struct hlist_node *hnode)
315 {
316         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
317                                                cfs_hash_head_dep_t, hd_head);
318         hlist_del_init(hnode);
319         return --hh->hd_depth;
320 }
321
322 /**
323  * double links hash head without depth tracking
324  * new element is always added to tail of hlist
325  */
326 typedef struct {
327         struct hlist_head       dh_head;        /**< entries list */
328         struct hlist_node       *dh_tail;       /**< the last entry */
329 } cfs_hash_dhead_t;
330
331 static int
332 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
333 {
334         return sizeof(cfs_hash_dhead_t);
335 }
336
337 static struct hlist_head *
338 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
339 {
340         cfs_hash_dhead_t *head;
341
342         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
343         return &head[bd->bd_offset].dh_head;
344 }
345
346 static int
347 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
348                       struct hlist_node *hnode)
349 {
350         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
351                                             cfs_hash_dhead_t, dh_head);
352
353         if (dh->dh_tail != NULL) /* not empty */
354                 hlist_add_after(dh->dh_tail, hnode);
355         else /* empty list */
356                 hlist_add_head(hnode, &dh->dh_head);
357         dh->dh_tail = hnode;
358         return -1; /* unknown depth */
359 }
360
361 static int
362 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
363                       struct hlist_node *hnd)
364 {
365         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
366                                             cfs_hash_dhead_t, dh_head);
367
368         if (hnd->next == NULL) { /* it's the tail */
369                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
370                               container_of(hnd->pprev, struct hlist_node, next);
371         }
372         hlist_del_init(hnd);
373         return -1; /* unknown depth */
374 }
375
376 /**
377  * double links hash head with depth tracking
378  * new element is always added to tail of hlist
379  */
380 typedef struct {
381         struct hlist_head       dd_head;        /**< entries list */
382         struct hlist_node       *dd_tail;       /**< the last entry */
383         unsigned int        dd_depth;       /**< list length */
384 } cfs_hash_dhead_dep_t;
385
386 static int
387 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
388 {
389         return sizeof(cfs_hash_dhead_dep_t);
390 }
391
392 static struct hlist_head *
393 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
394 {
395         cfs_hash_dhead_dep_t *head;
396
397         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
398         return &head[bd->bd_offset].dd_head;
399 }
400
401 static int
402 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
403                       struct hlist_node *hnode)
404 {
405         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
406                                                 cfs_hash_dhead_dep_t, dd_head);
407
408         if (dh->dd_tail != NULL) /* not empty */
409                 hlist_add_after(dh->dd_tail, hnode);
410         else /* empty list */
411                 hlist_add_head(hnode, &dh->dd_head);
412         dh->dd_tail = hnode;
413         return ++dh->dd_depth;
414 }
415
416 static int
417 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
418                       struct hlist_node *hnd)
419 {
420         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
421                                                 cfs_hash_dhead_dep_t, dd_head);
422
423         if (hnd->next == NULL) { /* it's the tail */
424                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
425                               container_of(hnd->pprev, struct hlist_node, next);
426         }
427         hlist_del_init(hnd);
428         return --dh->dd_depth;
429 }
430
431 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
432        .hop_hhead      = cfs_hash_hh_hhead,
433        .hop_hhead_size = cfs_hash_hh_hhead_size,
434        .hop_hnode_add  = cfs_hash_hh_hnode_add,
435        .hop_hnode_del  = cfs_hash_hh_hnode_del,
436 };
437
438 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
439        .hop_hhead      = cfs_hash_hd_hhead,
440        .hop_hhead_size = cfs_hash_hd_hhead_size,
441        .hop_hnode_add  = cfs_hash_hd_hnode_add,
442        .hop_hnode_del  = cfs_hash_hd_hnode_del,
443 };
444
445 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
446        .hop_hhead      = cfs_hash_dh_hhead,
447        .hop_hhead_size = cfs_hash_dh_hhead_size,
448        .hop_hnode_add  = cfs_hash_dh_hnode_add,
449        .hop_hnode_del  = cfs_hash_dh_hnode_del,
450 };
451
452 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
453        .hop_hhead      = cfs_hash_dd_hhead,
454        .hop_hhead_size = cfs_hash_dd_hhead_size,
455        .hop_hnode_add  = cfs_hash_dd_hnode_add,
456        .hop_hnode_del  = cfs_hash_dd_hnode_del,
457 };
458
459 static void
460 cfs_hash_hlist_setup(cfs_hash_t *hs)
461 {
462         if (cfs_hash_with_add_tail(hs)) {
463                 hs->hs_hops = cfs_hash_with_depth(hs) ?
464                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
465         } else {
466                 hs->hs_hops = cfs_hash_with_depth(hs) ?
467                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
468         }
469 }
470
471 static void
472 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
473                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
474 {
475         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
476
477         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
478
479         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
480         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
481 }
482
483 void
484 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
485 {
486         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
487         if (likely(hs->hs_rehash_buckets == NULL)) {
488                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
489                                      hs->hs_cur_bits, key, bd);
490         } else {
491                 LASSERT(hs->hs_rehash_bits != 0);
492                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
493                                      hs->hs_rehash_bits, key, bd);
494         }
495 }
496 EXPORT_SYMBOL(cfs_hash_bd_get);
497
498 static inline void
499 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
500 {
501         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
502                 return;
503
504         bd->bd_bucket->hsb_depmax = dep_cur;
505 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
506         if (likely(warn_on_depth == 0 ||
507                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
508                 return;
509
510         spin_lock(&hs->hs_dep_lock);
511         hs->hs_dep_max  = dep_cur;
512         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
513         hs->hs_dep_off  = bd->bd_offset;
514         hs->hs_dep_bits = hs->hs_cur_bits;
515         spin_unlock(&hs->hs_dep_lock);
516
517         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
518 # endif
519 }
520
521 void
522 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
523                        struct hlist_node *hnode)
524 {
525         int             rc;
526
527         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
528         cfs_hash_bd_dep_record(hs, bd, rc);
529         bd->bd_bucket->hsb_version++;
530         if (unlikely(bd->bd_bucket->hsb_version == 0))
531                 bd->bd_bucket->hsb_version++;
532         bd->bd_bucket->hsb_count++;
533
534         if (cfs_hash_with_counter(hs))
535                 atomic_inc(&hs->hs_count);
536         if (!cfs_hash_with_no_itemref(hs))
537                 cfs_hash_get(hs, hnode);
538 }
539 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
540
541 void
542 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
543                        struct hlist_node *hnode)
544 {
545         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
546
547         LASSERT(bd->bd_bucket->hsb_count > 0);
548         bd->bd_bucket->hsb_count--;
549         bd->bd_bucket->hsb_version++;
550         if (unlikely(bd->bd_bucket->hsb_version == 0))
551                 bd->bd_bucket->hsb_version++;
552
553         if (cfs_hash_with_counter(hs)) {
554                 LASSERT(atomic_read(&hs->hs_count) > 0);
555                 atomic_dec(&hs->hs_count);
556         }
557         if (!cfs_hash_with_no_itemref(hs))
558                 cfs_hash_put_locked(hs, hnode);
559 }
560 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
561
562 void
563 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
564                         cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
565 {
566         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
567         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
568         int             rc;
569
570         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
571                 return;
572
573         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
574          * in cfs_hash_bd_del/add_locked */
575         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
576         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
577         cfs_hash_bd_dep_record(hs, bd_new, rc);
578
579         LASSERT(obkt->hsb_count > 0);
580         obkt->hsb_count--;
581         obkt->hsb_version++;
582         if (unlikely(obkt->hsb_version == 0))
583                 obkt->hsb_version++;
584         nbkt->hsb_count++;
585         nbkt->hsb_version++;
586         if (unlikely(nbkt->hsb_version == 0))
587                 nbkt->hsb_version++;
588 }
589 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
590
591 enum {
592         /** always set, for sanity (avoid ZERO intent) */
593         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
594         /** return entry with a ref */
595         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
596         /** add entry if not existing */
597         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
598         /** delete entry, ignore other masks */
599         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
600 };
601
602 typedef enum cfs_hash_lookup_intent {
603         /** return item w/o refcount */
604         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
605         /** return item with refcount */
606         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
607                                        CFS_HS_LOOKUP_MASK_REF),
608         /** return item w/o refcount if existed, otherwise add */
609         CFS_HS_LOOKUP_IT_ADD    = (CFS_HS_LOOKUP_MASK_FIND |
610                                        CFS_HS_LOOKUP_MASK_ADD),
611         /** return item with refcount if existed, otherwise add */
612         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
613                                        CFS_HS_LOOKUP_MASK_ADD),
614         /** delete if existed */
615         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
616                                        CFS_HS_LOOKUP_MASK_DEL)
617 } cfs_hash_lookup_intent_t;
618
619 static struct hlist_node *
620 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
621                           const void *key, struct hlist_node *hnode,
622                           cfs_hash_lookup_intent_t intent)
623
624 {
625         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
626         struct hlist_node  *ehnode;
627         struct hlist_node  *match;
628         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
629
630         /* with this function, we can avoid a lot of useless refcount ops,
631          * which are expensive atomic operations most time. */
632         match = intent_add ? NULL : hnode;
633         hlist_for_each(ehnode, hhead) {
634                 if (!cfs_hash_keycmp(hs, key, ehnode))
635                         continue;
636
637                 if (match != NULL && match != ehnode) /* can't match */
638                         continue;
639
640                 /* match and ... */
641                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
642                         cfs_hash_bd_del_locked(hs, bd, ehnode);
643                         return ehnode;
644                 }
645
646                 /* caller wants refcount? */
647                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
648                         cfs_hash_get(hs, ehnode);
649                 return ehnode;
650         }
651         /* no match item */
652         if (!intent_add)
653                 return NULL;
654
655         LASSERT(hnode != NULL);
656         cfs_hash_bd_add_locked(hs, bd, hnode);
657         return hnode;
658 }
659
660 struct hlist_node *
661 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
662 {
663         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
664                                          CFS_HS_LOOKUP_IT_FIND);
665 }
666 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
667
668 struct hlist_node *
669 cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
670 {
671         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
672                                          CFS_HS_LOOKUP_IT_PEEK);
673 }
674 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
675
676 struct hlist_node *
677 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
678                            const void *key, struct hlist_node *hnode,
679                            int noref)
680 {
681         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
682                                          CFS_HS_LOOKUP_IT_ADD |
683                                          (!noref * CFS_HS_LOOKUP_MASK_REF));
684 }
685 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
686
687 struct hlist_node *
688 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
689                            const void *key, struct hlist_node *hnode)
690 {
691         /* hnode can be NULL, we find the first item with @key */
692         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
693                                          CFS_HS_LOOKUP_IT_FINDDEL);
694 }
695 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
696
697 static void
698 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
699                        unsigned n, int excl)
700 {
701         cfs_hash_bucket_t *prev = NULL;
702         int             i;
703
704         /**
705          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
706          * NB: it's possible that several bds point to the same bucket but
707          * have different bd::bd_offset, so need take care of deadlock.
708          */
709         cfs_hash_for_each_bd(bds, n, i) {
710                 if (prev == bds[i].bd_bucket)
711                         continue;
712
713                 LASSERT(prev == NULL ||
714                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
715                 cfs_hash_bd_lock(hs, &bds[i], excl);
716                 prev = bds[i].bd_bucket;
717         }
718 }
719
720 static void
721 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
722                          unsigned n, int excl)
723 {
724         cfs_hash_bucket_t *prev = NULL;
725         int             i;
726
727         cfs_hash_for_each_bd(bds, n, i) {
728                 if (prev != bds[i].bd_bucket) {
729                         cfs_hash_bd_unlock(hs, &bds[i], excl);
730                         prev = bds[i].bd_bucket;
731                 }
732         }
733 }
734
735 static struct hlist_node *
736 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
737                                 unsigned n, const void *key)
738 {
739         struct hlist_node  *ehnode;
740         unsigned           i;
741
742         cfs_hash_for_each_bd(bds, n, i) {
743                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
744                                                    CFS_HS_LOOKUP_IT_FIND);
745                 if (ehnode != NULL)
746                         return ehnode;
747         }
748         return NULL;
749 }
750
751 static struct hlist_node *
752 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
753                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
754                                  struct hlist_node *hnode, int noref)
755 {
756         struct hlist_node  *ehnode;
757         int             intent;
758         unsigned           i;
759
760         LASSERT(hnode != NULL);
761         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
762
763         cfs_hash_for_each_bd(bds, n, i) {
764                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
765                                                    NULL, intent);
766                 if (ehnode != NULL)
767                         return ehnode;
768         }
769
770         if (i == 1) { /* only one bucket */
771                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
772         } else {
773                 cfs_hash_bd_t      mybd;
774
775                 cfs_hash_bd_get(hs, key, &mybd);
776                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
777         }
778
779         return hnode;
780 }
781
782 static struct hlist_node *
783 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
784                                  unsigned n, const void *key,
785                                  struct hlist_node *hnode)
786 {
787         struct hlist_node  *ehnode;
788         unsigned           i;
789
790         cfs_hash_for_each_bd(bds, n, i) {
791                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
792                                                    CFS_HS_LOOKUP_IT_FINDDEL);
793                 if (ehnode != NULL)
794                         return ehnode;
795         }
796         return NULL;
797 }
798
799 static void
800 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
801 {
802         int     rc;
803
804         if (bd2->bd_bucket == NULL)
805                 return;
806
807         if (bd1->bd_bucket == NULL) {
808                 *bd1 = *bd2;
809                 bd2->bd_bucket = NULL;
810                 return;
811         }
812
813         rc = cfs_hash_bd_compare(bd1, bd2);
814         if (rc == 0) {
815                 bd2->bd_bucket = NULL;
816
817         } else if (rc > 0) { /* swab bd1 and bd2 */
818                 cfs_hash_bd_t tmp;
819
820                 tmp = *bd2;
821                 *bd2 = *bd1;
822                 *bd1 = tmp;
823         }
824 }
825
826 void
827 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
828 {
829         /* NB: caller should hold hs_lock.rw if REHASH is set */
830         cfs_hash_bd_from_key(hs, hs->hs_buckets,
831                              hs->hs_cur_bits, key, &bds[0]);
832         if (likely(hs->hs_rehash_buckets == NULL)) {
833                 /* no rehash or not rehashing */
834                 bds[1].bd_bucket = NULL;
835                 return;
836         }
837
838         LASSERT(hs->hs_rehash_bits != 0);
839         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
840                              hs->hs_rehash_bits, key, &bds[1]);
841
842         cfs_hash_bd_order(&bds[0], &bds[1]);
843 }
844 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
845
846 void
847 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
848 {
849         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
850 }
851 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
852
853 void
854 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
855 {
856         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
857 }
858 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
859
860 struct hlist_node *
861 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
862                                const void *key)
863 {
864         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
865 }
866 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
867
868 struct hlist_node *
869 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
870                                 const void *key, struct hlist_node *hnode,
871                                 int noref)
872 {
873         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
874                                                 hnode, noref);
875 }
876 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
877
878 struct hlist_node *
879 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
880                                 const void *key, struct hlist_node *hnode)
881 {
882         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
883 }
884 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
885
886 static void
887 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
888                       int bkt_size, int prev_size, int size)
889 {
890         int     i;
891
892         for (i = prev_size; i < size; i++) {
893                 if (buckets[i] != NULL)
894                         LIBCFS_FREE(buckets[i], bkt_size);
895         }
896
897         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
898 }
899
900 /*
901  * Create or grow bucket memory. Return old_buckets if no allocation was
902  * needed, the newly allocated buckets if allocation was needed and
903  * successful, and NULL on error.
904  */
905 static cfs_hash_bucket_t **
906 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
907                          unsigned int old_size, unsigned int new_size)
908 {
909         cfs_hash_bucket_t **new_bkts;
910         int              i;
911
912         LASSERT(old_size == 0 || old_bkts != NULL);
913
914         if (old_bkts != NULL && old_size == new_size)
915                 return old_bkts;
916
917         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
918         if (new_bkts == NULL)
919                 return NULL;
920
921         if (old_bkts != NULL) {
922                 memcpy(new_bkts, old_bkts,
923                        min(old_size, new_size) * sizeof(*old_bkts));
924         }
925
926         for (i = old_size; i < new_size; i++) {
927                 struct hlist_head *hhead;
928                 cfs_hash_bd_t     bd;
929
930                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
931                 if (new_bkts[i] == NULL) {
932                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
933                                               old_size, new_size);
934                         return NULL;
935                 }
936
937                 new_bkts[i]->hsb_index   = i;
938                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
939                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
940                 bd.bd_bucket = new_bkts[i];
941                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
942                         INIT_HLIST_HEAD(hhead);
943
944                 if (cfs_hash_with_no_lock(hs) ||
945                     cfs_hash_with_no_bktlock(hs))
946                         continue;
947
948                 if (cfs_hash_with_rw_bktlock(hs))
949                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
950                 else if (cfs_hash_with_spin_bktlock(hs))
951                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
952                 else
953                         LBUG(); /* invalid use-case */
954         }
955         return new_bkts;
956 }
957
958 /**
959  * Initialize new libcfs hash, where:
960  * @name     - Descriptive hash name
961  * @cur_bits - Initial hash table size, in bits
962  * @max_bits - Maximum allowed hash table resize, in bits
963  * @ops      - Registered hash table operations
964  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
965  *         - CFS_HASH_SORT enable chained hash sort
966  */
967 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
968
969 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
970 static int cfs_hash_dep_print(cfs_workitem_t *wi)
971 {
972         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
973         int      dep;
974         int      bkt;
975         int      off;
976         int      bits;
977
978         spin_lock(&hs->hs_dep_lock);
979         dep  = hs->hs_dep_max;
980         bkt  = hs->hs_dep_bkt;
981         off  = hs->hs_dep_off;
982         bits = hs->hs_dep_bits;
983         spin_unlock(&hs->hs_dep_lock);
984
985         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
986                       hs->hs_name, bits, dep, bkt, off);
987         spin_lock(&hs->hs_dep_lock);
988         hs->hs_dep_bits = 0; /* mark as workitem done */
989         spin_unlock(&hs->hs_dep_lock);
990         return 0;
991 }
992
993 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
994 {
995         spin_lock_init(&hs->hs_dep_lock);
996         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
997 }
998
999 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
1000 {
1001         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1002                 return;
1003
1004         spin_lock(&hs->hs_dep_lock);
1005         while (hs->hs_dep_bits != 0) {
1006                 spin_unlock(&hs->hs_dep_lock);
1007                 cond_resched();
1008                 spin_lock(&hs->hs_dep_lock);
1009         }
1010         spin_unlock(&hs->hs_dep_lock);
1011 }
1012
1013 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1014
1015 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1016 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1017
1018 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1019
1020 cfs_hash_t *
1021 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1022                 unsigned bkt_bits, unsigned extra_bytes,
1023                 unsigned min_theta, unsigned max_theta,
1024                 cfs_hash_ops_t *ops, unsigned flags)
1025 {
1026         cfs_hash_t *hs;
1027         int      len;
1028
1029         CLASSERT(CFS_HASH_THETA_BITS < 15);
1030
1031         LASSERT(name != NULL);
1032         LASSERT(ops != NULL);
1033         LASSERT(ops->hs_key);
1034         LASSERT(ops->hs_hash);
1035         LASSERT(ops->hs_object);
1036         LASSERT(ops->hs_keycmp);
1037         LASSERT(ops->hs_get != NULL);
1038         LASSERT(ops->hs_put_locked != NULL);
1039
1040         if ((flags & CFS_HASH_REHASH) != 0)
1041                 flags |= CFS_HASH_COUNTER; /* must have counter */
1042
1043         LASSERT(cur_bits > 0);
1044         LASSERT(cur_bits >= bkt_bits);
1045         LASSERT(max_bits >= cur_bits && max_bits < 31);
1046         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1047         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1048                      (flags & CFS_HASH_NO_LOCK) == 0));
1049         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1050                       ops->hs_keycpy != NULL));
1051
1052         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1053               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1054         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1055         if (hs == NULL)
1056                 return NULL;
1057
1058         strncpy(hs->hs_name, name, len);
1059         hs->hs_name[len - 1] = '\0';
1060         hs->hs_flags = flags;
1061
1062         atomic_set(&hs->hs_refcount, 1);
1063         atomic_set(&hs->hs_count, 0);
1064
1065         cfs_hash_lock_setup(hs);
1066         cfs_hash_hlist_setup(hs);
1067
1068         hs->hs_cur_bits = (__u8)cur_bits;
1069         hs->hs_min_bits = (__u8)cur_bits;
1070         hs->hs_max_bits = (__u8)max_bits;
1071         hs->hs_bkt_bits = (__u8)bkt_bits;
1072
1073         hs->hs_ops       = ops;
1074         hs->hs_extra_bytes = extra_bytes;
1075         hs->hs_rehash_bits = 0;
1076         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1077         cfs_hash_depth_wi_init(hs);
1078
1079         if (cfs_hash_with_rehash(hs))
1080                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1081
1082         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1083                                                   CFS_HASH_NBKT(hs));
1084         if (hs->hs_buckets != NULL)
1085                 return hs;
1086
1087         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1088         return NULL;
1089 }
1090 EXPORT_SYMBOL(cfs_hash_create);
1091
1092 /**
1093  * Cleanup libcfs hash @hs.
1094  */
1095 static void
1096 cfs_hash_destroy(cfs_hash_t *hs)
1097 {
1098         struct hlist_node     *hnode;
1099         struct hlist_node     *pos;
1100         cfs_hash_bd_t    bd;
1101         int                i;
1102
1103         LASSERT(hs != NULL);
1104         LASSERT(!cfs_hash_is_exiting(hs) &&
1105                 !cfs_hash_is_iterating(hs));
1106
1107         /**
1108          * prohibit further rehashes, don't need any lock because
1109          * I'm the only (last) one can change it.
1110          */
1111         hs->hs_exiting = 1;
1112         if (cfs_hash_with_rehash(hs))
1113                 cfs_hash_rehash_cancel(hs);
1114
1115         cfs_hash_depth_wi_cancel(hs);
1116         /* rehash should be done/canceled */
1117         LASSERT(hs->hs_buckets != NULL &&
1118                 hs->hs_rehash_buckets == NULL);
1119
1120         cfs_hash_for_each_bucket(hs, &bd, i) {
1121                 struct hlist_head *hhead;
1122
1123                 LASSERT(bd.bd_bucket != NULL);
1124                 /* no need to take this lock, just for consistent code */
1125                 cfs_hash_bd_lock(hs, &bd, 1);
1126
1127                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1128                         hlist_for_each_safe(hnode, pos, hhead) {
1129                                 LASSERTF(!cfs_hash_with_assert_empty(hs),
1130                                          "hash %s bucket %u(%u) is not "
1131                                          " empty: %u items left\n",
1132                                          hs->hs_name, bd.bd_bucket->hsb_index,
1133                                          bd.bd_offset, bd.bd_bucket->hsb_count);
1134                                 /* can't assert key valicate, because we
1135                                  * can interrupt rehash */
1136                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1137                                 cfs_hash_exit(hs, hnode);
1138                         }
1139                 }
1140                 LASSERT(bd.bd_bucket->hsb_count == 0);
1141                 cfs_hash_bd_unlock(hs, &bd, 1);
1142                 cond_resched();
1143         }
1144
1145         LASSERT(atomic_read(&hs->hs_count) == 0);
1146
1147         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1148                               0, CFS_HASH_NBKT(hs));
1149         i = cfs_hash_with_bigname(hs) ?
1150             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1151         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1152 }
1153
1154 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1155 {
1156         if (atomic_inc_not_zero(&hs->hs_refcount))
1157                 return hs;
1158         return NULL;
1159 }
1160 EXPORT_SYMBOL(cfs_hash_getref);
1161
1162 void cfs_hash_putref(cfs_hash_t *hs)
1163 {
1164         if (atomic_dec_and_test(&hs->hs_refcount))
1165                 cfs_hash_destroy(hs);
1166 }
1167 EXPORT_SYMBOL(cfs_hash_putref);
1168
1169 static inline int
1170 cfs_hash_rehash_bits(cfs_hash_t *hs)
1171 {
1172         if (cfs_hash_with_no_lock(hs) ||
1173             !cfs_hash_with_rehash(hs))
1174                 return -EOPNOTSUPP;
1175
1176         if (unlikely(cfs_hash_is_exiting(hs)))
1177                 return -ESRCH;
1178
1179         if (unlikely(cfs_hash_is_rehashing(hs)))
1180                 return -EALREADY;
1181
1182         if (unlikely(cfs_hash_is_iterating(hs)))
1183                 return -EAGAIN;
1184
1185         /* XXX: need to handle case with max_theta != 2.0
1186          *      and the case with min_theta != 0.5 */
1187         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1188             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1189                 return hs->hs_cur_bits + 1;
1190
1191         if (!cfs_hash_with_shrink(hs))
1192                 return 0;
1193
1194         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1195             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1196                 return hs->hs_cur_bits - 1;
1197
1198         return 0;
1199 }
1200
1201 /**
1202  * don't allow inline rehash if:
1203  * - user wants non-blocking change (add/del) on hash table
1204  * - too many elements
1205  */
1206 static inline int
1207 cfs_hash_rehash_inline(cfs_hash_t *hs)
1208 {
1209         return !cfs_hash_with_nblk_change(hs) &&
1210                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1211 }
1212
1213 /**
1214  * Add item @hnode to libcfs hash @hs using @key.  The registered
1215  * ops->hs_get function will be called when the item is added.
1216  */
1217 void
1218 cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1219 {
1220         cfs_hash_bd_t   bd;
1221         int          bits;
1222
1223         LASSERT(hlist_unhashed(hnode));
1224
1225         cfs_hash_lock(hs, 0);
1226         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1227
1228         cfs_hash_key_validate(hs, key, hnode);
1229         cfs_hash_bd_add_locked(hs, &bd, hnode);
1230
1231         cfs_hash_bd_unlock(hs, &bd, 1);
1232
1233         bits = cfs_hash_rehash_bits(hs);
1234         cfs_hash_unlock(hs, 0);
1235         if (bits > 0)
1236                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1237 }
1238 EXPORT_SYMBOL(cfs_hash_add);
1239
1240 static struct hlist_node *
1241 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1242                      struct hlist_node *hnode, int noref)
1243 {
1244         struct hlist_node *ehnode;
1245         cfs_hash_bd_t     bds[2];
1246         int            bits = 0;
1247
1248         LASSERT(hlist_unhashed(hnode));
1249
1250         cfs_hash_lock(hs, 0);
1251         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1252
1253         cfs_hash_key_validate(hs, key, hnode);
1254         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1255                                                  hnode, noref);
1256         cfs_hash_dual_bd_unlock(hs, bds, 1);
1257
1258         if (ehnode == hnode) /* new item added */
1259                 bits = cfs_hash_rehash_bits(hs);
1260         cfs_hash_unlock(hs, 0);
1261         if (bits > 0)
1262                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1263
1264         return ehnode;
1265 }
1266
1267 /**
1268  * Add item @hnode to libcfs hash @hs using @key.  The registered
1269  * ops->hs_get function will be called if the item was added.
1270  * Returns 0 on success or -EALREADY on key collisions.
1271  */
1272 int
1273 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1274 {
1275         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1276                -EALREADY : 0;
1277 }
1278 EXPORT_SYMBOL(cfs_hash_add_unique);
1279
1280 /**
1281  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1282  * already exists in the hash then ops->hs_get will be called on the
1283  * conflicting entry and that entry will be returned to the caller.
1284  * Otherwise ops->hs_get is called on the item which was added.
1285  */
1286 void *
1287 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1288                         struct hlist_node *hnode)
1289 {
1290         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1291
1292         return cfs_hash_object(hs, hnode);
1293 }
1294 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1295
1296 /**
1297  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1298  * is required to ensure the correct hash bucket is locked since there
1299  * is no direct linkage from the item to the bucket.  The object
1300  * removed from the hash will be returned and obs->hs_put is called
1301  * on the removed object.
1302  */
1303 void *
1304 cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1305 {
1306         void       *obj  = NULL;
1307         int          bits = 0;
1308         cfs_hash_bd_t   bds[2];
1309
1310         cfs_hash_lock(hs, 0);
1311         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1312
1313         /* NB: do nothing if @hnode is not in hash table */
1314         if (hnode == NULL || !hlist_unhashed(hnode)) {
1315                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1316                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1317                 } else {
1318                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1319                                                                 key, hnode);
1320                 }
1321         }
1322
1323         if (hnode != NULL) {
1324                 obj  = cfs_hash_object(hs, hnode);
1325                 bits = cfs_hash_rehash_bits(hs);
1326         }
1327
1328         cfs_hash_dual_bd_unlock(hs, bds, 1);
1329         cfs_hash_unlock(hs, 0);
1330         if (bits > 0)
1331                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1332
1333         return obj;
1334 }
1335 EXPORT_SYMBOL(cfs_hash_del);
1336
1337 /**
1338  * Delete item given @key in libcfs hash @hs.  The first @key found in
1339  * the hash will be removed, if the key exists multiple times in the hash
1340  * @hs this function must be called once per key.  The removed object
1341  * will be returned and ops->hs_put is called on the removed object.
1342  */
1343 void *
1344 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1345 {
1346         return cfs_hash_del(hs, key, NULL);
1347 }
1348 EXPORT_SYMBOL(cfs_hash_del_key);
1349
1350 /**
1351  * Lookup an item using @key in the libcfs hash @hs and return it.
1352  * If the @key is found in the hash hs->hs_get() is called and the
1353  * matching objects is returned.  It is the callers responsibility
1354  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1355  * when when finished with the object.  If the @key was not found
1356  * in the hash @hs NULL is returned.
1357  */
1358 void *
1359 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1360 {
1361         void             *obj = NULL;
1362         struct hlist_node     *hnode;
1363         cfs_hash_bd_t    bds[2];
1364
1365         cfs_hash_lock(hs, 0);
1366         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1367
1368         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1369         if (hnode != NULL)
1370                 obj = cfs_hash_object(hs, hnode);
1371
1372         cfs_hash_dual_bd_unlock(hs, bds, 0);
1373         cfs_hash_unlock(hs, 0);
1374
1375         return obj;
1376 }
1377 EXPORT_SYMBOL(cfs_hash_lookup);
1378
1379 static void
1380 cfs_hash_for_each_enter(cfs_hash_t *hs)
1381 {
1382         LASSERT(!cfs_hash_is_exiting(hs));
1383
1384         if (!cfs_hash_with_rehash(hs))
1385                 return;
1386         /*
1387          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1388          * because it's just an unreliable signal to rehash-thread,
1389          * rehash-thread will try to finsih rehash ASAP when seeing this.
1390          */
1391         hs->hs_iterating = 1;
1392
1393         cfs_hash_lock(hs, 1);
1394         hs->hs_iterators++;
1395
1396         /* NB: iteration is mostly called by service thread,
1397          * we tend to cancel pending rehash-requst, instead of
1398          * blocking service thread, we will relaunch rehash request
1399          * after iteration */
1400         if (cfs_hash_is_rehashing(hs))
1401                 cfs_hash_rehash_cancel_locked(hs);
1402         cfs_hash_unlock(hs, 1);
1403 }
1404
1405 static void
1406 cfs_hash_for_each_exit(cfs_hash_t *hs)
1407 {
1408         int remained;
1409         int bits;
1410
1411         if (!cfs_hash_with_rehash(hs))
1412                 return;
1413         cfs_hash_lock(hs, 1);
1414         remained = --hs->hs_iterators;
1415         bits = cfs_hash_rehash_bits(hs);
1416         cfs_hash_unlock(hs, 1);
1417         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1418         if (remained == 0)
1419                 hs->hs_iterating = 0;
1420         if (bits > 0) {
1421                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1422                                     CFS_HASH_LOOP_HOG);
1423         }
1424 }
1425
1426 /**
1427  * For each item in the libcfs hash @hs call the passed callback @func
1428  * and pass to it as an argument each hash item and the private @data.
1429  *
1430  * a) the function may sleep!
1431  * b) during the callback:
1432  *    . the bucket lock is held so the callback must never sleep.
1433  *    . if @removal_safe is true, use can remove current item by
1434  *      cfs_hash_bd_del_locked
1435  */
1436 static __u64
1437 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1438                         void *data, int remove_safe)
1439 {
1440         struct hlist_node     *hnode;
1441         struct hlist_node     *pos;
1442         cfs_hash_bd_t    bd;
1443         __u64            count = 0;
1444         int                excl  = !!remove_safe;
1445         int                loop  = 0;
1446         int                i;
1447
1448         cfs_hash_for_each_enter(hs);
1449
1450         cfs_hash_lock(hs, 0);
1451         LASSERT(!cfs_hash_is_rehashing(hs));
1452
1453         cfs_hash_for_each_bucket(hs, &bd, i) {
1454                 struct hlist_head *hhead;
1455
1456                 cfs_hash_bd_lock(hs, &bd, excl);
1457                 if (func == NULL) { /* only glimpse size */
1458                         count += bd.bd_bucket->hsb_count;
1459                         cfs_hash_bd_unlock(hs, &bd, excl);
1460                         continue;
1461                 }
1462
1463                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1464                         hlist_for_each_safe(hnode, pos, hhead) {
1465                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1466                                 count++;
1467                                 loop++;
1468                                 if (func(hs, &bd, hnode, data)) {
1469                                         cfs_hash_bd_unlock(hs, &bd, excl);
1470                                         goto out;
1471                                 }
1472                         }
1473                 }
1474                 cfs_hash_bd_unlock(hs, &bd, excl);
1475                 if (loop < CFS_HASH_LOOP_HOG)
1476                         continue;
1477                 loop = 0;
1478                 cfs_hash_unlock(hs, 0);
1479                 cond_resched();
1480                 cfs_hash_lock(hs, 0);
1481         }
1482  out:
1483         cfs_hash_unlock(hs, 0);
1484
1485         cfs_hash_for_each_exit(hs);
1486         return count;
1487 }
1488
1489 typedef struct {
1490         cfs_hash_cond_opt_cb_t  func;
1491         void               *arg;
1492 } cfs_hash_cond_arg_t;
1493
1494 static int
1495 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1496                          struct hlist_node *hnode, void *data)
1497 {
1498         cfs_hash_cond_arg_t *cond = data;
1499
1500         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1501                 cfs_hash_bd_del_locked(hs, bd, hnode);
1502         return 0;
1503 }
1504
1505 /**
1506  * Delete item from the libcfs hash @hs when @func return true.
1507  * The write lock being hold during loop for each bucket to avoid
1508  * any object be reference.
1509  */
1510 void
1511 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1512 {
1513         cfs_hash_cond_arg_t arg = {
1514                 .func   = func,
1515                 .arg    = data,
1516         };
1517
1518         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1519 }
1520 EXPORT_SYMBOL(cfs_hash_cond_del);
1521
1522 void
1523 cfs_hash_for_each(cfs_hash_t *hs,
1524                   cfs_hash_for_each_cb_t func, void *data)
1525 {
1526         cfs_hash_for_each_tight(hs, func, data, 0);
1527 }
1528 EXPORT_SYMBOL(cfs_hash_for_each);
1529
1530 void
1531 cfs_hash_for_each_safe(cfs_hash_t *hs,
1532                        cfs_hash_for_each_cb_t func, void *data)
1533 {
1534         cfs_hash_for_each_tight(hs, func, data, 1);
1535 }
1536 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1537
1538 static int
1539 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1540               struct hlist_node *hnode, void *data)
1541 {
1542         *(int *)data = 0;
1543         return 1; /* return 1 to break the loop */
1544 }
1545
1546 int
1547 cfs_hash_is_empty(cfs_hash_t *hs)
1548 {
1549         int empty = 1;
1550
1551         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1552         return empty;
1553 }
1554 EXPORT_SYMBOL(cfs_hash_is_empty);
1555
1556 __u64
1557 cfs_hash_size_get(cfs_hash_t *hs)
1558 {
1559         return cfs_hash_with_counter(hs) ?
1560                atomic_read(&hs->hs_count) :
1561                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1562 }
1563 EXPORT_SYMBOL(cfs_hash_size_get);
1564
1565 /*
1566  * cfs_hash_for_each_relax:
1567  * Iterate the hash table and call @func on each item without
1568  * any lock. This function can't guarantee to finish iteration
1569  * if these features are enabled:
1570  *
1571  *  a. if rehash_key is enabled, an item can be moved from
1572  *     one bucket to another bucket
1573  *  b. user can remove non-zero-ref item from hash-table,
1574  *     so the item can be removed from hash-table, even worse,
1575  *     it's possible that user changed key and insert to another
1576  *     hash bucket.
1577  * there's no way for us to finish iteration correctly on previous
1578  * two cases, so iteration has to be stopped on change.
1579  */
1580 static int
1581 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1582 {
1583         struct hlist_node *hnode;
1584         struct hlist_node *tmp;
1585         cfs_hash_bd_t     bd;
1586         __u32        version;
1587         int            count = 0;
1588         int            stop_on_change;
1589         int            rc;
1590         int            i;
1591
1592         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1593                          !cfs_hash_with_no_itemref(hs) ||
1594                          CFS_HOP(hs, put_locked) == NULL;
1595         cfs_hash_lock(hs, 0);
1596         LASSERT(!cfs_hash_is_rehashing(hs));
1597
1598         cfs_hash_for_each_bucket(hs, &bd, i) {
1599                 struct hlist_head *hhead;
1600
1601                 cfs_hash_bd_lock(hs, &bd, 0);
1602                 version = cfs_hash_bd_version_get(&bd);
1603
1604                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1605                         for (hnode = hhead->first; hnode != NULL;) {
1606                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1607                                 cfs_hash_get(hs, hnode);
1608                                 cfs_hash_bd_unlock(hs, &bd, 0);
1609                                 cfs_hash_unlock(hs, 0);
1610
1611                                 rc = func(hs, &bd, hnode, data);
1612                                 if (stop_on_change)
1613                                         cfs_hash_put(hs, hnode);
1614                                 cond_resched();
1615                                 count++;
1616
1617                                 cfs_hash_lock(hs, 0);
1618                                 cfs_hash_bd_lock(hs, &bd, 0);
1619                                 if (!stop_on_change) {
1620                                         tmp = hnode->next;
1621                                         cfs_hash_put_locked(hs, hnode);
1622                                         hnode = tmp;
1623                                 } else { /* bucket changed? */
1624                                         if (version !=
1625                                             cfs_hash_bd_version_get(&bd))
1626                                                 break;
1627                                         /* safe to continue because no change */
1628                                         hnode = hnode->next;
1629                                 }
1630                                 if (rc) /* callback wants to break iteration */
1631                                         break;
1632                         }
1633                 }
1634                 cfs_hash_bd_unlock(hs, &bd, 0);
1635         }
1636         cfs_hash_unlock(hs, 0);
1637
1638         return count;
1639 }
1640
1641 int
1642 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1643                          cfs_hash_for_each_cb_t func, void *data)
1644 {
1645         if (cfs_hash_with_no_lock(hs) ||
1646             cfs_hash_with_rehash_key(hs) ||
1647             !cfs_hash_with_no_itemref(hs))
1648                 return -EOPNOTSUPP;
1649
1650         if (CFS_HOP(hs, get) == NULL ||
1651             (CFS_HOP(hs, put) == NULL &&
1652              CFS_HOP(hs, put_locked) == NULL))
1653                 return -EOPNOTSUPP;
1654
1655         cfs_hash_for_each_enter(hs);
1656         cfs_hash_for_each_relax(hs, func, data);
1657         cfs_hash_for_each_exit(hs);
1658
1659         return 0;
1660 }
1661 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1662
1663 /**
1664  * For each hash bucket in the libcfs hash @hs call the passed callback
1665  * @func until all the hash buckets are empty.  The passed callback @func
1666  * or the previously registered callback hs->hs_put must remove the item
1667  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1668  * functions.  No rwlocks will be held during the callback @func it is
1669  * safe to sleep if needed.  This function will not terminate until the
1670  * hash is empty.  Note it is still possible to concurrently add new
1671  * items in to the hash.  It is the callers responsibility to ensure
1672  * the required locking is in place to prevent concurrent insertions.
1673  */
1674 int
1675 cfs_hash_for_each_empty(cfs_hash_t *hs,
1676                         cfs_hash_for_each_cb_t func, void *data)
1677 {
1678         unsigned  i = 0;
1679
1680         if (cfs_hash_with_no_lock(hs))
1681                 return -EOPNOTSUPP;
1682
1683         if (CFS_HOP(hs, get) == NULL ||
1684             (CFS_HOP(hs, put) == NULL &&
1685              CFS_HOP(hs, put_locked) == NULL))
1686                 return -EOPNOTSUPP;
1687
1688         cfs_hash_for_each_enter(hs);
1689         while (cfs_hash_for_each_relax(hs, func, data)) {
1690                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1691                        hs->hs_name, i++);
1692         }
1693         cfs_hash_for_each_exit(hs);
1694         return 0;
1695 }
1696 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1697
1698 void
1699 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1700                         cfs_hash_for_each_cb_t func, void *data)
1701 {
1702         struct hlist_head   *hhead;
1703         struct hlist_node   *hnode;
1704         cfs_hash_bd_t       bd;
1705
1706         cfs_hash_for_each_enter(hs);
1707         cfs_hash_lock(hs, 0);
1708         if (hindex >= CFS_HASH_NHLIST(hs))
1709                 goto out;
1710
1711         cfs_hash_bd_index_set(hs, hindex, &bd);
1712
1713         cfs_hash_bd_lock(hs, &bd, 0);
1714         hhead = cfs_hash_bd_hhead(hs, &bd);
1715         hlist_for_each(hnode, hhead) {
1716                 if (func(hs, &bd, hnode, data))
1717                         break;
1718         }
1719         cfs_hash_bd_unlock(hs, &bd, 0);
1720  out:
1721         cfs_hash_unlock(hs, 0);
1722         cfs_hash_for_each_exit(hs);
1723 }
1724
1725 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1726
1727 /*
1728  * For each item in the libcfs hash @hs which matches the @key call
1729  * the passed callback @func and pass to it as an argument each hash
1730  * item and the private @data. During the callback the bucket lock
1731  * is held so the callback must never sleep.
1732    */
1733 void
1734 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1735                       cfs_hash_for_each_cb_t func, void *data)
1736 {
1737         struct hlist_node   *hnode;
1738         cfs_hash_bd_t       bds[2];
1739         unsigned            i;
1740
1741         cfs_hash_lock(hs, 0);
1742
1743         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1744
1745         cfs_hash_for_each_bd(bds, 2, i) {
1746                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1747
1748                 hlist_for_each(hnode, hlist) {
1749                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1750
1751                         if (cfs_hash_keycmp(hs, key, hnode)) {
1752                                 if (func(hs, &bds[i], hnode, data))
1753                                         break;
1754                         }
1755                 }
1756         }
1757
1758         cfs_hash_dual_bd_unlock(hs, bds, 0);
1759         cfs_hash_unlock(hs, 0);
1760 }
1761 EXPORT_SYMBOL(cfs_hash_for_each_key);
1762
1763 /**
1764  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1765  * to grow the hash size when excessive chaining is detected, or to
1766  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1767  * flag is set in @hs the libcfs hash may be dynamically rehashed
1768  * during addition or removal if the hash's theta value exceeds
1769  * either the hs->hs_min_theta or hs->max_theta values.  By default
1770  * these values are tuned to keep the chained hash depth small, and
1771  * this approach assumes a reasonably uniform hashing function.  The
1772  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1773  */
1774 void
1775 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1776 {
1777         int     i;
1778
1779         /* need hold cfs_hash_lock(hs, 1) */
1780         LASSERT(cfs_hash_with_rehash(hs) &&
1781                 !cfs_hash_with_no_lock(hs));
1782
1783         if (!cfs_hash_is_rehashing(hs))
1784                 return;
1785
1786         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1787                 hs->hs_rehash_bits = 0;
1788                 return;
1789         }
1790
1791         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1792                 cfs_hash_unlock(hs, 1);
1793                 /* raise console warning while waiting too long */
1794                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1795                        "hash %s is still rehashing, rescheded %d\n",
1796                        hs->hs_name, i - 1);
1797                 cond_resched();
1798                 cfs_hash_lock(hs, 1);
1799         }
1800 }
1801 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1802
1803 void
1804 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1805 {
1806         cfs_hash_lock(hs, 1);
1807         cfs_hash_rehash_cancel_locked(hs);
1808         cfs_hash_unlock(hs, 1);
1809 }
1810 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1811
1812 int
1813 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1814 {
1815         int     rc;
1816
1817         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1818
1819         cfs_hash_lock(hs, 1);
1820
1821         rc = cfs_hash_rehash_bits(hs);
1822         if (rc <= 0) {
1823                 cfs_hash_unlock(hs, 1);
1824                 return rc;
1825         }
1826
1827         hs->hs_rehash_bits = rc;
1828         if (!do_rehash) {
1829                 /* launch and return */
1830                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1831                 cfs_hash_unlock(hs, 1);
1832                 return 0;
1833         }
1834
1835         /* rehash right now */
1836         cfs_hash_unlock(hs, 1);
1837
1838         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1839 }
1840 EXPORT_SYMBOL(cfs_hash_rehash);
1841
1842 static int
1843 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1844 {
1845         cfs_hash_bd_t      new;
1846         struct hlist_head  *hhead;
1847         struct hlist_node  *hnode;
1848         struct hlist_node  *pos;
1849         void          *key;
1850         int             c = 0;
1851
1852         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1853         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1854                 hlist_for_each_safe(hnode, pos, hhead) {
1855                         key = cfs_hash_key(hs, hnode);
1856                         LASSERT(key != NULL);
1857                         /* Validate hnode is in the correct bucket. */
1858                         cfs_hash_bucket_validate(hs, old, hnode);
1859                         /*
1860                          * Delete from old hash bucket; move to new bucket.
1861                          * ops->hs_key must be defined.
1862                          */
1863                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1864                                              hs->hs_rehash_bits, key, &new);
1865                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1866                         c++;
1867                 }
1868         }
1869
1870         return c;
1871 }
1872
1873 static int
1874 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1875 {
1876         cfs_hash_t       *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1877         cfs_hash_bucket_t **bkts;
1878         cfs_hash_bd_t       bd;
1879         unsigned int    old_size;
1880         unsigned int    new_size;
1881         int              bsize;
1882         int              count = 0;
1883         int              rc = 0;
1884         int              i;
1885
1886         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1887
1888         cfs_hash_lock(hs, 0);
1889         LASSERT(cfs_hash_is_rehashing(hs));
1890
1891         old_size = CFS_HASH_NBKT(hs);
1892         new_size = CFS_HASH_RH_NBKT(hs);
1893
1894         cfs_hash_unlock(hs, 0);
1895
1896         /*
1897          * don't need hs::hs_rwlock for hs::hs_buckets,
1898          * because nobody can change bkt-table except me.
1899          */
1900         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1901                                         old_size, new_size);
1902         cfs_hash_lock(hs, 1);
1903         if (bkts == NULL) {
1904                 rc = -ENOMEM;
1905                 goto out;
1906         }
1907
1908         if (bkts == hs->hs_buckets) {
1909                 bkts = NULL; /* do nothing */
1910                 goto out;
1911         }
1912
1913         rc = __cfs_hash_theta(hs);
1914         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1915                 /* free the new allocated bkt-table */
1916                 old_size = new_size;
1917                 new_size = CFS_HASH_NBKT(hs);
1918                 rc = -EALREADY;
1919                 goto out;
1920         }
1921
1922         LASSERT(hs->hs_rehash_buckets == NULL);
1923         hs->hs_rehash_buckets = bkts;
1924
1925         rc = 0;
1926         cfs_hash_for_each_bucket(hs, &bd, i) {
1927                 if (cfs_hash_is_exiting(hs)) {
1928                         rc = -ESRCH;
1929                         /* someone wants to destroy the hash, abort now */
1930                         if (old_size < new_size) /* OK to free old bkt-table */
1931                                 break;
1932                         /* it's shrinking, need free new bkt-table */
1933                         hs->hs_rehash_buckets = NULL;
1934                         old_size = new_size;
1935                         new_size = CFS_HASH_NBKT(hs);
1936                         goto out;
1937                 }
1938
1939                 count += cfs_hash_rehash_bd(hs, &bd);
1940                 if (count < CFS_HASH_LOOP_HOG ||
1941                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1942                         continue;
1943                 }
1944
1945                 count = 0;
1946                 cfs_hash_unlock(hs, 1);
1947                 cond_resched();
1948                 cfs_hash_lock(hs, 1);
1949         }
1950
1951         hs->hs_rehash_count++;
1952
1953         bkts = hs->hs_buckets;
1954         hs->hs_buckets = hs->hs_rehash_buckets;
1955         hs->hs_rehash_buckets = NULL;
1956
1957         hs->hs_cur_bits = hs->hs_rehash_bits;
1958  out:
1959         hs->hs_rehash_bits = 0;
1960         if (rc == -ESRCH) /* never be scheduled again */
1961                 cfs_wi_exit(cfs_sched_rehash, wi);
1962         bsize = cfs_hash_bkt_size(hs);
1963         cfs_hash_unlock(hs, 1);
1964         /* can't refer to @hs anymore because it could be destroyed */
1965         if (bkts != NULL)
1966                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1967         if (rc != 0)
1968                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1969         /* return 1 only if cfs_wi_exit is called */
1970         return rc == -ESRCH;
1971 }
1972
1973 /**
1974  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1975  * @old_key must be provided to locate the objects previous location
1976  * in the hash, and the @new_key will be used to reinsert the object.
1977  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1978  * combo when it is critical that there is no window in time where the
1979  * object is missing from the hash.  When an object is being rehashed
1980  * the registered cfs_hash_get() and cfs_hash_put() functions will
1981  * not be called.
1982  */
1983 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1984                          void *new_key, struct hlist_node *hnode)
1985 {
1986         cfs_hash_bd_t   bds[3];
1987         cfs_hash_bd_t   old_bds[2];
1988         cfs_hash_bd_t   new_bd;
1989
1990         LASSERT(!hlist_unhashed(hnode));
1991
1992         cfs_hash_lock(hs, 0);
1993
1994         cfs_hash_dual_bd_get(hs, old_key, old_bds);
1995         cfs_hash_bd_get(hs, new_key, &new_bd);
1996
1997         bds[0] = old_bds[0];
1998         bds[1] = old_bds[1];
1999         bds[2] = new_bd;
2000
2001         /* NB: bds[0] and bds[1] are ordered already */
2002         cfs_hash_bd_order(&bds[1], &bds[2]);
2003         cfs_hash_bd_order(&bds[0], &bds[1]);
2004
2005         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2006         if (likely(old_bds[1].bd_bucket == NULL)) {
2007                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2008         } else {
2009                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2010                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2011         }
2012         /* overwrite key inside locks, otherwise may screw up with
2013          * other operations, i.e: rehash */
2014         cfs_hash_keycpy(hs, new_key, hnode);
2015
2016         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2017         cfs_hash_unlock(hs, 0);
2018 }
2019 EXPORT_SYMBOL(cfs_hash_rehash_key);
2020
2021 int cfs_hash_debug_header(struct seq_file *m)
2022 {
2023         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2024                  CFS_HASH_BIGNAME_LEN,
2025                  "name", "cur", "min", "max", "theta", "t-min", "t-max",
2026                  "flags", "rehash", "count", "maxdep", "maxdepb",
2027                  " distribution");
2028 }
2029 EXPORT_SYMBOL(cfs_hash_debug_header);
2030
2031 static cfs_hash_bucket_t **
2032 cfs_hash_full_bkts(cfs_hash_t *hs)
2033 {
2034         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2035         if (hs->hs_rehash_buckets == NULL)
2036                 return hs->hs_buckets;
2037
2038         LASSERT(hs->hs_rehash_bits != 0);
2039         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2040                hs->hs_rehash_buckets : hs->hs_buckets;
2041 }
2042
2043 static unsigned int
2044 cfs_hash_full_nbkt(cfs_hash_t *hs)
2045 {
2046         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2047         if (hs->hs_rehash_buckets == NULL)
2048                 return CFS_HASH_NBKT(hs);
2049
2050         LASSERT(hs->hs_rehash_bits != 0);
2051         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2052                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2053 }
2054
2055 int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
2056 {
2057         int                 dist[8] = { 0, };
2058         int                 maxdep  = -1;
2059         int                 maxdepb = -1;
2060         int                 total   = 0;
2061         int                 theta;
2062         int                 i;
2063
2064         cfs_hash_lock(hs, 0);
2065         theta = __cfs_hash_theta(hs);
2066
2067         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2068                       CFS_HASH_BIGNAME_LEN, hs->hs_name,
2069                       1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2070                       1 << hs->hs_max_bits,
2071                       __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2072                       __cfs_hash_theta_int(hs->hs_min_theta),
2073                       __cfs_hash_theta_frac(hs->hs_min_theta),
2074                       __cfs_hash_theta_int(hs->hs_max_theta),
2075                       __cfs_hash_theta_frac(hs->hs_max_theta),
2076                       hs->hs_flags, hs->hs_rehash_count);
2077
2078         /*
2079          * The distribution is a summary of the chained hash depth in
2080          * each of the libcfs hash buckets.  Each buckets hsb_count is
2081          * divided by the hash theta value and used to generate a
2082          * histogram of the hash distribution.  A uniform hash will
2083          * result in all hash buckets being close to the average thus
2084          * only the first few entries in the histogram will be non-zero.
2085          * If you hash function results in a non-uniform hash the will
2086          * be observable by outlier bucks in the distribution histogram.
2087          *
2088          * Uniform hash distribution:      128/128/0/0/0/0/0/0
2089          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2090          */
2091         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2092                 cfs_hash_bd_t  bd;
2093
2094                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2095                 cfs_hash_bd_lock(hs, &bd, 0);
2096                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2097                         maxdep  = bd.bd_bucket->hsb_depmax;
2098                         maxdepb = ffz(~maxdep);
2099                 }
2100                 total += bd.bd_bucket->hsb_count;
2101                 dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2102                 cfs_hash_bd_unlock(hs, &bd, 0);
2103         }
2104
2105         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2106         for (i = 0; i < 8; i++)
2107                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2108
2109         cfs_hash_unlock(hs, 0);
2110
2111         return 0;
2112 }
2113 EXPORT_SYMBOL(cfs_hash_debug_str);