]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 15:58:43 +0000 (08:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Mar 2011 15:58:43 +0000 (08:58 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw:
  GFS2: Don't use _raw version of RCU dereference
  GFS2: Adding missing unlock_page()
  GFS2: Update to AIL list locking
  GFS2: introduce AIL lock
  GFS2: fix block allocation check for fallocate
  GFS2: Optimize glock multiple-dequeue code
  GFS2: Remove potential race in flock code
  GFS2: Fix glock deallocation race
  GFS2: quota allows exceeding hard limit
  GFS2: deallocation performance patch
  GFS2: panics on quotacheck update
  GFS2: Improve cluster mmap scalability
  GFS2: Fix glock queue trace point
  GFS2: Post-VFS scale update for RCU path walk
  GFS2: Use RCU for glock hash table

1  2 
fs/gfs2/glock.c
fs/gfs2/main.c

diff --combined fs/gfs2/glock.c
index 7cd9a5a68d59d441f4dcf611d604a18a11e2b61f,85044b41824983dd58f445b6a263372a8f6bcc63..e2431313491f1e54e136be64dcc3b0c3ba1033bc
@@@ -26,6 -26,9 +26,9 @@@
  #include <linux/freezer.h>
  #include <linux/workqueue.h>
  #include <linux/jiffies.h>
+ #include <linux/rcupdate.h>
+ #include <linux/rculist_bl.h>
+ #include <linux/bit_spinlock.h>
  
  #include "gfs2.h"
  #include "incore.h"
  #define CREATE_TRACE_POINTS
  #include "trace_gfs2.h"
  
- struct gfs2_gl_hash_bucket {
-         struct hlist_head hb_list;
- };
  struct gfs2_glock_iter {
        int hash;                       /* hash bucket index         */
        struct gfs2_sbd *sdp;           /* incore superblock         */
@@@ -54,7 -53,6 +53,6 @@@
  
  typedef void (*glock_examiner) (struct gfs2_glock * gl);
  
- static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
@@@ -70,57 -68,9 +68,9 @@@ static DEFINE_SPINLOCK(lru_lock)
  #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
  #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
  
- static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
+ static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
  static struct dentry *gfs2_root;
  
- /*
-  * Despite what you might think, the numbers below are not arbitrary :-)
-  * They are taken from the ipv4 routing hash code, which is well tested
-  * and thus should be nearly optimal. Later on we might tweek the numbers
-  * but for now this should be fine.
-  *
-  * The reason for putting the locks in a separate array from the list heads
-  * is that we can have fewer locks than list heads and save memory. We use
-  * the same hash function for both, but with a different hash mask.
-  */
- #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
-       defined(CONFIG_PROVE_LOCKING)
- #ifdef CONFIG_LOCKDEP
- # define GL_HASH_LOCK_SZ        256
- #else
- # if NR_CPUS >= 32
- #  define GL_HASH_LOCK_SZ       4096
- # elif NR_CPUS >= 16
- #  define GL_HASH_LOCK_SZ       2048
- # elif NR_CPUS >= 8
- #  define GL_HASH_LOCK_SZ       1024
- # elif NR_CPUS >= 4
- #  define GL_HASH_LOCK_SZ       512
- # else
- #  define GL_HASH_LOCK_SZ       256
- # endif
- #endif
- /* We never want more locks than chains */
- #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
- # undef GL_HASH_LOCK_SZ
- # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
- #endif
- static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
- static inline rwlock_t *gl_lock_addr(unsigned int x)
- {
-       return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
- }
- #else /* not SMP, so no spinlocks required */
- static inline rwlock_t *gl_lock_addr(unsigned int x)
- {
-       return NULL;
- }
- #endif
  /**
   * gl_hash() - Turn glock number into hash bucket number
   * @lock: The glock number
@@@ -141,25 -91,35 +91,35 @@@ static unsigned int gl_hash(const struc
        return h;
  }
  
- /**
-  * glock_free() - Perform a few checks and then release struct gfs2_glock
-  * @gl: The glock to release
-  *
-  * Also calls lock module to release its internal structure for this glock.
-  *
-  */
+ static inline void spin_lock_bucket(unsigned int hash)
+ {
+       struct hlist_bl_head *bl = &gl_hash_table[hash];
+       bit_spin_lock(0, (unsigned long *)bl);
+ }
  
- static void glock_free(struct gfs2_glock *gl)
+ static inline void spin_unlock_bucket(unsigned int hash)
+ {
+       struct hlist_bl_head *bl = &gl_hash_table[hash];
+       __bit_spin_unlock(0, (unsigned long *)bl);
+ }
+ static void gfs2_glock_dealloc(struct rcu_head *rcu)
+ {
+       struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
+       if (gl->gl_ops->go_flags & GLOF_ASPACE)
+               kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+       else
+               kmem_cache_free(gfs2_glock_cachep, gl);
+ }
+ void gfs2_glock_free(struct gfs2_glock *gl)
  {
        struct gfs2_sbd *sdp = gl->gl_sbd;
-       struct address_space *mapping = gfs2_glock2aspace(gl);
-       struct kmem_cache *cachep = gfs2_glock_cachep;
  
-       GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
-       trace_gfs2_glock_put(gl);
-       if (mapping)
-               cachep = gfs2_glock_aspace_cachep;
-       sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
+       call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+       if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+               wake_up(&sdp->sd_glock_wait);
  }
  
  /**
@@@ -185,34 -145,49 +145,49 @@@ static int demote_ok(const struct gfs2_
  {
        const struct gfs2_glock_operations *glops = gl->gl_ops;
  
+       /* assert_spin_locked(&gl->gl_spin); */
        if (gl->gl_state == LM_ST_UNLOCKED)
                return 0;
-       if (!list_empty(&gl->gl_holders))
+       if (test_bit(GLF_LFLUSH, &gl->gl_flags))
+               return 0;
+       if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
+           !list_empty(&gl->gl_holders))
                return 0;
        if (glops->go_demote_ok)
                return glops->go_demote_ok(gl);
        return 1;
  }
  
  /**
-  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
+  * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
   * @gl: the glock
   *
+  * If the glock is demotable, then we add it (or move it) to the end
+  * of the glock LRU list.
   */
  
- static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
+ static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  {
-       int may_reclaim;
-       may_reclaim = (demote_ok(gl) &&
-                      (atomic_read(&gl->gl_ref) == 1 ||
-                       (gl->gl_name.ln_type == LM_TYPE_INODE &&
-                        atomic_read(&gl->gl_ref) <= 2)));
-       spin_lock(&lru_lock);
-       if (list_empty(&gl->gl_lru) && may_reclaim) {
+       if (demote_ok(gl)) {
+               spin_lock(&lru_lock);
+               if (!list_empty(&gl->gl_lru))
+                       list_del_init(&gl->gl_lru);
+               else
+                       atomic_inc(&lru_count);
                list_add_tail(&gl->gl_lru, &lru_list);
-               atomic_inc(&lru_count);
+               spin_unlock(&lru_lock);
        }
-       spin_unlock(&lru_lock);
+ }
+ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
+ {
+       spin_lock(&gl->gl_spin);
+       __gfs2_glock_schedule_for_reclaim(gl);
+       spin_unlock(&gl->gl_spin);
  }
  
  /**
@@@ -227,7 -202,6 +202,6 @@@ void gfs2_glock_put_nolock(struct gfs2_
  {
        if (atomic_dec_and_test(&gl->gl_ref))
                GLOCK_BUG_ON(gl, 1);
-       gfs2_glock_schedule_for_reclaim(gl);
  }
  
  /**
   *
   */
  
int gfs2_glock_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
  {
-       int rv = 0;
+       struct gfs2_sbd *sdp = gl->gl_sbd;
+       struct address_space *mapping = gfs2_glock2aspace(gl);
  
-       write_lock(gl_lock_addr(gl->gl_hash));
-       if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
-               hlist_del(&gl->gl_list);
+       if (atomic_dec_and_test(&gl->gl_ref)) {
+               spin_lock_bucket(gl->gl_hash);
+               hlist_bl_del_rcu(&gl->gl_list);
+               spin_unlock_bucket(gl->gl_hash);
+               spin_lock(&lru_lock);
                if (!list_empty(&gl->gl_lru)) {
                        list_del_init(&gl->gl_lru);
                        atomic_dec(&lru_count);
                }
                spin_unlock(&lru_lock);
-               write_unlock(gl_lock_addr(gl->gl_hash));
                GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
-               glock_free(gl);
-               rv = 1;
-               goto out;
+               GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+               trace_gfs2_glock_put(gl);
+               sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
        }
-       spin_lock(&gl->gl_spin);
-       gfs2_glock_schedule_for_reclaim(gl);
-       spin_unlock(&gl->gl_spin);
-       write_unlock(gl_lock_addr(gl->gl_hash));
- out:
-       return rv;
  }
  
  /**
@@@ -275,17 -245,15 +245,15 @@@ static struct gfs2_glock *search_bucket
                                        const struct lm_lockname *name)
  {
        struct gfs2_glock *gl;
-       struct hlist_node *h;
+       struct hlist_bl_node *h;
  
-       hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
+       hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
                if (!lm_name_equal(&gl->gl_name, name))
                        continue;
                if (gl->gl_sbd != sdp)
                        continue;
-               atomic_inc(&gl->gl_ref);
-               return gl;
+               if (atomic_inc_not_zero(&gl->gl_ref))
+                       return gl;
        }
  
        return NULL;
@@@ -743,10 -711,11 +711,11 @@@ int gfs2_glock_get(struct gfs2_sbd *sdp
        struct gfs2_glock *gl, *tmp;
        unsigned int hash = gl_hash(sdp, &name);
        struct address_space *mapping;
+       struct kmem_cache *cachep;
  
-       read_lock(gl_lock_addr(hash));
+       rcu_read_lock();
        gl = search_bucket(hash, sdp, &name);
-       read_unlock(gl_lock_addr(hash));
+       rcu_read_unlock();
  
        *glp = gl;
        if (gl)
                return -ENOENT;
  
        if (glops->go_flags & GLOF_ASPACE)
-               gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+               cachep = gfs2_glock_aspace_cachep;
        else
-               gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+               cachep = gfs2_glock_cachep;
+       gl = kmem_cache_alloc(cachep, GFP_KERNEL);
        if (!gl)
                return -ENOMEM;
  
                mapping->writeback_index = 0;
        }
  
-       write_lock(gl_lock_addr(hash));
+       spin_lock_bucket(hash);
        tmp = search_bucket(hash, sdp, &name);
        if (tmp) {
-               write_unlock(gl_lock_addr(hash));
-               glock_free(gl);
+               spin_unlock_bucket(hash);
+               kmem_cache_free(cachep, gl);
+               atomic_dec(&sdp->sd_glock_disposal);
                gl = tmp;
        } else {
-               hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
-               write_unlock(gl_lock_addr(hash));
+               hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
+               spin_unlock_bucket(hash);
        }
  
        *glp = gl;
@@@ -1007,13 -978,13 +978,13 @@@ fail
                        insert_pt = &gh2->gh_list;
        }
        set_bit(GLF_QUEUED, &gl->gl_flags);
+       trace_gfs2_glock_queue(gh, 1);
        if (likely(insert_pt == NULL)) {
                list_add_tail(&gh->gh_list, &gl->gl_holders);
                if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
                        goto do_cancel;
                return;
        }
-       trace_gfs2_glock_queue(gh, 1);
        list_add_tail(&gh->gh_list, insert_pt);
  do_cancel:
        gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@@ -1113,6 -1084,7 +1084,7 @@@ void gfs2_glock_dq(struct gfs2_holder *
                    !test_bit(GLF_DEMOTE, &gl->gl_flags))
                        fast_path = 1;
        }
+       __gfs2_glock_schedule_for_reclaim(gl);
        trace_gfs2_glock_queue(gh, 0);
        spin_unlock(&gl->gl_spin);
        if (likely(fast_path))
@@@ -1276,10 -1248,8 +1248,8 @@@ int gfs2_glock_nq_m(unsigned int num_gh
  
  void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  {
-       unsigned int x;
-       for (x = 0; x < num_gh; x++)
-               gfs2_glock_dq(&ghs[x]);
+       while (num_gh--)
+               gfs2_glock_dq(&ghs[num_gh]);
  }
  
  /**
  
  void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  {
-       unsigned int x;
-       for (x = 0; x < num_gh; x++)
-               gfs2_glock_dq_uninit(&ghs[x]);
+       while (num_gh--)
+               gfs2_glock_dq_uninit(&ghs[num_gh]);
  }
  
  void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
@@@ -1440,42 -1408,30 +1408,30 @@@ static struct shrinker glock_shrinker 
   * @sdp: the filesystem
   * @bucket: the bucket
   *
-  * Returns: 1 if the bucket has entries
   */
  
- static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
+ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
                          unsigned int hash)
  {
-       struct gfs2_glock *gl, *prev = NULL;
-       int has_entries = 0;
-       struct hlist_head *head = &gl_hash_table[hash].hb_list;
+       struct gfs2_glock *gl;
+       struct hlist_bl_head *head = &gl_hash_table[hash];
+       struct hlist_bl_node *pos;
  
-       read_lock(gl_lock_addr(hash));
-       /* Can't use hlist_for_each_entry - don't want prefetch here */
-       if (hlist_empty(head))
-               goto out;
-       gl = list_entry(head->first, struct gfs2_glock, gl_list);
-       while(1) {
-               if (!sdp || gl->gl_sbd == sdp) {
-                       gfs2_glock_hold(gl);
-                       read_unlock(gl_lock_addr(hash));
-                       if (prev)
-                               gfs2_glock_put(prev);
-                       prev = gl;
+       rcu_read_lock();
+       hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
+               if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
                        examiner(gl);
-                       has_entries = 1;
-                       read_lock(gl_lock_addr(hash));
-               }
-               if (gl->gl_list.next == NULL)
-                       break;
-               gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
        }
- out:
-       read_unlock(gl_lock_addr(hash));
-       if (prev)
-               gfs2_glock_put(prev);
+       rcu_read_unlock();
        cond_resched();
-       return has_entries;
+ }
+ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
+ {
+       unsigned x;
+       for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+               examine_bucket(examiner, sdp, x);
  }
  
  
@@@ -1529,10 -1485,21 +1485,21 @@@ static void clear_glock(struct gfs2_glo
  
  void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  {
-       unsigned x;
+       glock_hash_walk(thaw_glock, sdp);
+ }
  
-       for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
-               examine_bucket(thaw_glock, sdp, x);
+ static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
+ {
+       int ret;
+       spin_lock(&gl->gl_spin);
+       ret = __dump_glock(seq, gl);
+       spin_unlock(&gl->gl_spin);
+       return ret;
+ }
+ static void dump_glock_func(struct gfs2_glock *gl)
+ {
+       dump_glock(NULL, gl);
  }
  
  /**
  
  void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  {
-       unsigned int x;
-       for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
-               examine_bucket(clear_glock, sdp, x);
+       glock_hash_walk(clear_glock, sdp);
        flush_workqueue(glock_workqueue);
        wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
-       gfs2_dump_lockstate(sdp);
+       glock_hash_walk(dump_glock_func, sdp);
  }
  
  void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
        return error;
  }
  
- static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
- {
-       int ret;
-       spin_lock(&gl->gl_spin);
-       ret = __dump_glock(seq, gl);
-       spin_unlock(&gl->gl_spin);
-       return ret;
- }
  
- /**
-  * gfs2_dump_lockstate - print out the current lockstate
-  * @sdp: the filesystem
-  * @ub: the buffer to copy the information into
-  *
-  * If @ub is NULL, dump the lockstate to the console.
-  *
-  */
- static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
- {
-       struct gfs2_glock *gl;
-       struct hlist_node *h;
-       unsigned int x;
-       int error = 0;
-       for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
-               read_lock(gl_lock_addr(x));
-               hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
-                       if (gl->gl_sbd != sdp)
-                               continue;
-                       error = dump_glock(NULL, gl);
-                       if (error)
-                               break;
-               }
-               read_unlock(gl_lock_addr(x));
-               if (error)
-                       break;
-       }
-       return error;
- }
  
  
  int __init gfs2_glock_init(void)
  {
        unsigned i;
        for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
-               INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
-       }
- #ifdef GL_HASH_LOCK_SZ
-       for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
-               rwlock_init(&gl_hash_locks[i]);
+               INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
        }
- #endif
  
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
 -                                        WQ_HIGHPRI | WQ_FREEZEABLE, 0);
 +                                        WQ_HIGHPRI | WQ_FREEZABLE, 0);
        if (IS_ERR(glock_workqueue))
                return PTR_ERR(glock_workqueue);
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
 -                                              WQ_MEM_RECLAIM | WQ_FREEZEABLE,
 +                                              WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
        if (IS_ERR(gfs2_delete_workqueue)) {
                destroy_workqueue(glock_workqueue);
@@@ -1802,62 -1715,54 +1715,54 @@@ void gfs2_glock_exit(void
        destroy_workqueue(gfs2_delete_workqueue);
  }
  
+ static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
+ {
+       return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
+                             struct gfs2_glock, gl_list);
+ }
+ static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
+ {
+       return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
+                             struct gfs2_glock, gl_list);
+ }
  static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
  {
        struct gfs2_glock *gl;
  
- restart:
-       read_lock(gl_lock_addr(gi->hash));
-       gl = gi->gl;
-       if (gl) {
-               gi->gl = hlist_entry(gl->gl_list.next,
-                                    struct gfs2_glock, gl_list);
-       } else {
-               gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
-                                    struct gfs2_glock, gl_list);
-       }
-       if (gi->gl)
-               gfs2_glock_hold(gi->gl);
-       read_unlock(gl_lock_addr(gi->hash));
-       if (gl)
-               gfs2_glock_put(gl);
-       while (gi->gl == NULL) {
-               gi->hash++;
-               if (gi->hash >= GFS2_GL_HASH_SIZE)
-                       return 1;
-               read_lock(gl_lock_addr(gi->hash));
-               gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
-                                    struct gfs2_glock, gl_list);
-               if (gi->gl)
-                       gfs2_glock_hold(gi->gl);
-               read_unlock(gl_lock_addr(gi->hash));
-       }
-       if (gi->sdp != gi->gl->gl_sbd)
-               goto restart;
+       do {
+               gl = gi->gl;
+               if (gl) {
+                       gi->gl = glock_hash_next(gl);
+               } else {
+                       gi->gl = glock_hash_chain(gi->hash);
+               }
+               while (gi->gl == NULL) {
+                       gi->hash++;
+                       if (gi->hash >= GFS2_GL_HASH_SIZE) {
+                               rcu_read_unlock();
+                               return 1;
+                       }
+                       gi->gl = glock_hash_chain(gi->hash);
+               }
+       /* Skip entries for other sb and dead entries */
+       } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
  
        return 0;
  }
  
- static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
- {
-       if (gi->gl)
-               gfs2_glock_put(gi->gl);
-       gi->gl = NULL;
- }
  static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  {
        struct gfs2_glock_iter *gi = seq->private;
        loff_t n = *pos;
  
        gi->hash = 0;
+       rcu_read_lock();
  
        do {
-               if (gfs2_glock_iter_next(gi)) {
-                       gfs2_glock_iter_free(gi);
+               if (gfs2_glock_iter_next(gi))
                        return NULL;
-               }
        } while (n--);
  
        return gi->gl;
@@@ -1870,10 -1775,8 +1775,8 @@@ static void *gfs2_glock_seq_next(struc
  
        (*pos)++;
  
-       if (gfs2_glock_iter_next(gi)) {
-               gfs2_glock_iter_free(gi);
+       if (gfs2_glock_iter_next(gi))
                return NULL;
-       }
  
        return gi->gl;
  }
  static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  {
        struct gfs2_glock_iter *gi = seq->private;
-       gfs2_glock_iter_free(gi);
+       if (gi->gl)
+               rcu_read_unlock();
+       gi->gl = NULL;
  }
  
  static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
diff --combined fs/gfs2/main.c
index 72c31a315d9658e1a03ca830ebbf7a14d06d08f4,d850004f20808e3803eb8a9e4869f1c915586357..888a5f5a1a583104b3b093f3257c84f152b3ce0e
@@@ -14,6 -14,8 +14,8 @@@
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/gfs2_ondisk.h>
+ #include <linux/rcupdate.h>
+ #include <linux/rculist_bl.h>
  #include <asm/atomic.h>
  
  #include "gfs2.h"
@@@ -45,7 -47,7 +47,7 @@@ static void gfs2_init_glock_once(void *
  {
        struct gfs2_glock *gl = foo;
  
-       INIT_HLIST_NODE(&gl->gl_list);
+       INIT_HLIST_BL_NODE(&gl->gl_list);
        spin_lock_init(&gl->gl_spin);
        INIT_LIST_HEAD(&gl->gl_holders);
        INIT_LIST_HEAD(&gl->gl_lru);
@@@ -59,7 -61,14 +61,7 @@@ static void gfs2_init_gl_aspace_once(vo
        struct address_space *mapping = (struct address_space *)(gl + 1);
  
        gfs2_init_glock_once(gl);
 -      memset(mapping, 0, sizeof(*mapping));
 -      INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
 -      spin_lock_init(&mapping->tree_lock);
 -      spin_lock_init(&mapping->i_mmap_lock);
 -      INIT_LIST_HEAD(&mapping->private_list);
 -      spin_lock_init(&mapping->private_lock);
 -      INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
 -      INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
 +      address_space_init_once(mapping);
  }
  
  /**
@@@ -137,7 -146,7 +139,7 @@@ static int __init init_gfs2_fs(void
  
        error = -ENOMEM;
        gfs_recovery_wq = alloc_workqueue("gfs_recovery",
 -                                        WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
 +                                        WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
        if (!gfs_recovery_wq)
                goto fail_wq;
  
@@@ -191,6 -200,8 +193,8 @@@ static void __exit exit_gfs2_fs(void
        unregister_filesystem(&gfs2meta_fs_type);
        destroy_workqueue(gfs_recovery_wq);
  
+       rcu_barrier();
        kmem_cache_destroy(gfs2_quotad_cachep);
        kmem_cache_destroy(gfs2_rgrpd_cachep);
        kmem_cache_destroy(gfs2_bufdata_cachep);