]> Pileus Git - ~andy/linux/blob - kernel/futex.c
futexes: Increase hash table size for better performance
[~andy/linux] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67
68 #include <asm/futex.h>
69
70 #include "locking/rtmutex_common.h"
71
72 int __read_mostly futex_cmpxchg_enabled;
73
74 /*
75  * Futex flags used to encode options to functions and preserve them across
76  * restarts.
77  */
78 #define FLAGS_SHARED            0x01
79 #define FLAGS_CLOCKRT           0x02
80 #define FLAGS_HAS_TIMEOUT       0x04
81
82 /*
83  * Priority Inheritance state:
84  */
85 struct futex_pi_state {
86         /*
87          * list of 'owned' pi_state instances - these have to be
88          * cleaned up in do_exit() if the task exits prematurely:
89          */
90         struct list_head list;
91
92         /*
93          * The PI object:
94          */
95         struct rt_mutex pi_mutex;
96
97         struct task_struct *owner;
98         atomic_t refcount;
99
100         union futex_key key;
101 };
102
103 /**
104  * struct futex_q - The hashed futex queue entry, one per waiting task
105  * @list:               priority-sorted list of tasks waiting on this futex
106  * @task:               the task waiting on the futex
107  * @lock_ptr:           the hash bucket lock
108  * @key:                the key the futex is hashed on
109  * @pi_state:           optional priority inheritance state
110  * @rt_waiter:          rt_waiter storage for use with requeue_pi
111  * @requeue_pi_key:     the requeue_pi target futex key
112  * @bitset:             bitset for the optional bitmasked wakeup
113  *
114  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
115  * we can wake only the relevant ones (hashed queues may be shared).
116  *
117  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
118  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
119  * The order of wakeup is always to make the first condition true, then
120  * the second.
121  *
122  * PI futexes are typically woken before they are removed from the hash list via
123  * the rt_mutex code. See unqueue_me_pi().
124  */
125 struct futex_q {
126         struct plist_node list;
127
128         struct task_struct *task;
129         spinlock_t *lock_ptr;
130         union futex_key key;
131         struct futex_pi_state *pi_state;
132         struct rt_mutex_waiter *rt_waiter;
133         union futex_key *requeue_pi_key;
134         u32 bitset;
135 };
136
137 static const struct futex_q futex_q_init = {
138         /* list gets initialized in queue_me()*/
139         .key = FUTEX_KEY_INIT,
140         .bitset = FUTEX_BITSET_MATCH_ANY
141 };
142
143 /*
144  * Hash buckets are shared by all the futex_keys that hash to the same
145  * location.  Each key may have multiple futex_q structures, one for each task
146  * waiting on a futex.
147  */
148 struct futex_hash_bucket {
149         spinlock_t lock;
150         struct plist_head chain;
151 } ____cacheline_aligned_in_smp;
152
153 static unsigned long __read_mostly futex_hashsize;
154
155 static struct futex_hash_bucket *futex_queues;
156
157 /*
158  * We hash on the keys returned from get_futex_key (see below).
159  */
160 static struct futex_hash_bucket *hash_futex(union futex_key *key)
161 {
162         u32 hash = jhash2((u32*)&key->both.word,
163                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
164                           key->both.offset);
165         return &futex_queues[hash & (futex_hashsize - 1)];
166 }
167
168 /*
169  * Return 1 if two futex_keys are equal, 0 otherwise.
170  */
171 static inline int match_futex(union futex_key *key1, union futex_key *key2)
172 {
173         return (key1 && key2
174                 && key1->both.word == key2->both.word
175                 && key1->both.ptr == key2->both.ptr
176                 && key1->both.offset == key2->both.offset);
177 }
178
179 /*
180  * Take a reference to the resource addressed by a key.
181  * Can be called while holding spinlocks.
182  *
183  */
184 static void get_futex_key_refs(union futex_key *key)
185 {
186         if (!key->both.ptr)
187                 return;
188
189         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
190         case FUT_OFF_INODE:
191                 ihold(key->shared.inode);
192                 break;
193         case FUT_OFF_MMSHARED:
194                 atomic_inc(&key->private.mm->mm_count);
195                 break;
196         }
197 }
198
199 /*
200  * Drop a reference to the resource addressed by a key.
201  * The hash bucket spinlock must not be held.
202  */
203 static void drop_futex_key_refs(union futex_key *key)
204 {
205         if (!key->both.ptr) {
206                 /* If we're here then we tried to put a key we failed to get */
207                 WARN_ON_ONCE(1);
208                 return;
209         }
210
211         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
212         case FUT_OFF_INODE:
213                 iput(key->shared.inode);
214                 break;
215         case FUT_OFF_MMSHARED:
216                 mmdrop(key->private.mm);
217                 break;
218         }
219 }
220
221 /**
222  * get_futex_key() - Get parameters which are the keys for a futex
223  * @uaddr:      virtual address of the futex
224  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
225  * @key:        address where result is stored.
226  * @rw:         mapping needs to be read/write (values: VERIFY_READ,
227  *              VERIFY_WRITE)
228  *
229  * Return: a negative error code or 0
230  *
231  * The key words are stored in *key on success.
232  *
233  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
234  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
235  * We can usually work out the index without swapping in the page.
236  *
237  * lock_page() might sleep, the caller should not hold a spinlock.
238  */
239 static int
240 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
241 {
242         unsigned long address = (unsigned long)uaddr;
243         struct mm_struct *mm = current->mm;
244         struct page *page, *page_head;
245         int err, ro = 0;
246
247         /*
248          * The futex address must be "naturally" aligned.
249          */
250         key->both.offset = address % PAGE_SIZE;
251         if (unlikely((address % sizeof(u32)) != 0))
252                 return -EINVAL;
253         address -= key->both.offset;
254
255         if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
256                 return -EFAULT;
257
258         /*
259          * PROCESS_PRIVATE futexes are fast.
260          * As the mm cannot disappear under us and the 'key' only needs
261          * virtual address, we dont even have to find the underlying vma.
262          * Note : We do have to check 'uaddr' is a valid user address,
263          *        but access_ok() should be faster than find_vma()
264          */
265         if (!fshared) {
266                 key->private.mm = mm;
267                 key->private.address = address;
268                 get_futex_key_refs(key);
269                 return 0;
270         }
271
272 again:
273         err = get_user_pages_fast(address, 1, 1, &page);
274         /*
275          * If write access is not required (eg. FUTEX_WAIT), try
276          * and get read-only access.
277          */
278         if (err == -EFAULT && rw == VERIFY_READ) {
279                 err = get_user_pages_fast(address, 1, 0, &page);
280                 ro = 1;
281         }
282         if (err < 0)
283                 return err;
284         else
285                 err = 0;
286
287 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
288         page_head = page;
289         if (unlikely(PageTail(page))) {
290                 put_page(page);
291                 /* serialize against __split_huge_page_splitting() */
292                 local_irq_disable();
293                 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
294                         page_head = compound_head(page);
295                         /*
296                          * page_head is valid pointer but we must pin
297                          * it before taking the PG_lock and/or
298                          * PG_compound_lock. The moment we re-enable
299                          * irqs __split_huge_page_splitting() can
300                          * return and the head page can be freed from
301                          * under us. We can't take the PG_lock and/or
302                          * PG_compound_lock on a page that could be
303                          * freed from under us.
304                          */
305                         if (page != page_head) {
306                                 get_page(page_head);
307                                 put_page(page);
308                         }
309                         local_irq_enable();
310                 } else {
311                         local_irq_enable();
312                         goto again;
313                 }
314         }
315 #else
316         page_head = compound_head(page);
317         if (page != page_head) {
318                 get_page(page_head);
319                 put_page(page);
320         }
321 #endif
322
323         lock_page(page_head);
324
325         /*
326          * If page_head->mapping is NULL, then it cannot be a PageAnon
327          * page; but it might be the ZERO_PAGE or in the gate area or
328          * in a special mapping (all cases which we are happy to fail);
329          * or it may have been a good file page when get_user_pages_fast
330          * found it, but truncated or holepunched or subjected to
331          * invalidate_complete_page2 before we got the page lock (also
332          * cases which we are happy to fail).  And we hold a reference,
333          * so refcount care in invalidate_complete_page's remove_mapping
334          * prevents drop_caches from setting mapping to NULL beneath us.
335          *
336          * The case we do have to guard against is when memory pressure made
337          * shmem_writepage move it from filecache to swapcache beneath us:
338          * an unlikely race, but we do need to retry for page_head->mapping.
339          */
340         if (!page_head->mapping) {
341                 int shmem_swizzled = PageSwapCache(page_head);
342                 unlock_page(page_head);
343                 put_page(page_head);
344                 if (shmem_swizzled)
345                         goto again;
346                 return -EFAULT;
347         }
348
349         /*
350          * Private mappings are handled in a simple way.
351          *
352          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
353          * it's a read-only handle, it's expected that futexes attach to
354          * the object not the particular process.
355          */
356         if (PageAnon(page_head)) {
357                 /*
358                  * A RO anonymous page will never change and thus doesn't make
359                  * sense for futex operations.
360                  */
361                 if (ro) {
362                         err = -EFAULT;
363                         goto out;
364                 }
365
366                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
367                 key->private.mm = mm;
368                 key->private.address = address;
369         } else {
370                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
371                 key->shared.inode = page_head->mapping->host;
372                 key->shared.pgoff = basepage_index(page);
373         }
374
375         get_futex_key_refs(key);
376
377 out:
378         unlock_page(page_head);
379         put_page(page_head);
380         return err;
381 }
382
383 static inline void put_futex_key(union futex_key *key)
384 {
385         drop_futex_key_refs(key);
386 }
387
388 /**
389  * fault_in_user_writeable() - Fault in user address and verify RW access
390  * @uaddr:      pointer to faulting user space address
391  *
392  * Slow path to fixup the fault we just took in the atomic write
393  * access to @uaddr.
394  *
395  * We have no generic implementation of a non-destructive write to the
396  * user address. We know that we faulted in the atomic pagefault
397  * disabled section so we can as well avoid the #PF overhead by
398  * calling get_user_pages() right away.
399  */
400 static int fault_in_user_writeable(u32 __user *uaddr)
401 {
402         struct mm_struct *mm = current->mm;
403         int ret;
404
405         down_read(&mm->mmap_sem);
406         ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
407                                FAULT_FLAG_WRITE);
408         up_read(&mm->mmap_sem);
409
410         return ret < 0 ? ret : 0;
411 }
412
413 /**
414  * futex_top_waiter() - Return the highest priority waiter on a futex
415  * @hb:         the hash bucket the futex_q's reside in
416  * @key:        the futex key (to distinguish it from other futex futex_q's)
417  *
418  * Must be called with the hb lock held.
419  */
420 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
421                                         union futex_key *key)
422 {
423         struct futex_q *this;
424
425         plist_for_each_entry(this, &hb->chain, list) {
426                 if (match_futex(&this->key, key))
427                         return this;
428         }
429         return NULL;
430 }
431
432 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
433                                       u32 uval, u32 newval)
434 {
435         int ret;
436
437         pagefault_disable();
438         ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
439         pagefault_enable();
440
441         return ret;
442 }
443
444 static int get_futex_value_locked(u32 *dest, u32 __user *from)
445 {
446         int ret;
447
448         pagefault_disable();
449         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
450         pagefault_enable();
451
452         return ret ? -EFAULT : 0;
453 }
454
455
456 /*
457  * PI code:
458  */
459 static int refill_pi_state_cache(void)
460 {
461         struct futex_pi_state *pi_state;
462
463         if (likely(current->pi_state_cache))
464                 return 0;
465
466         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
467
468         if (!pi_state)
469                 return -ENOMEM;
470
471         INIT_LIST_HEAD(&pi_state->list);
472         /* pi_mutex gets initialized later */
473         pi_state->owner = NULL;
474         atomic_set(&pi_state->refcount, 1);
475         pi_state->key = FUTEX_KEY_INIT;
476
477         current->pi_state_cache = pi_state;
478
479         return 0;
480 }
481
482 static struct futex_pi_state * alloc_pi_state(void)
483 {
484         struct futex_pi_state *pi_state = current->pi_state_cache;
485
486         WARN_ON(!pi_state);
487         current->pi_state_cache = NULL;
488
489         return pi_state;
490 }
491
492 static void free_pi_state(struct futex_pi_state *pi_state)
493 {
494         if (!atomic_dec_and_test(&pi_state->refcount))
495                 return;
496
497         /*
498          * If pi_state->owner is NULL, the owner is most probably dying
499          * and has cleaned up the pi_state already
500          */
501         if (pi_state->owner) {
502                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
503                 list_del_init(&pi_state->list);
504                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
505
506                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
507         }
508
509         if (current->pi_state_cache)
510                 kfree(pi_state);
511         else {
512                 /*
513                  * pi_state->list is already empty.
514                  * clear pi_state->owner.
515                  * refcount is at 0 - put it back to 1.
516                  */
517                 pi_state->owner = NULL;
518                 atomic_set(&pi_state->refcount, 1);
519                 current->pi_state_cache = pi_state;
520         }
521 }
522
523 /*
524  * Look up the task based on what TID userspace gave us.
525  * We dont trust it.
526  */
527 static struct task_struct * futex_find_get_task(pid_t pid)
528 {
529         struct task_struct *p;
530
531         rcu_read_lock();
532         p = find_task_by_vpid(pid);
533         if (p)
534                 get_task_struct(p);
535
536         rcu_read_unlock();
537
538         return p;
539 }
540
541 /*
542  * This task is holding PI mutexes at exit time => bad.
543  * Kernel cleans up PI-state, but userspace is likely hosed.
544  * (Robust-futex cleanup is separate and might save the day for userspace.)
545  */
546 void exit_pi_state_list(struct task_struct *curr)
547 {
548         struct list_head *next, *head = &curr->pi_state_list;
549         struct futex_pi_state *pi_state;
550         struct futex_hash_bucket *hb;
551         union futex_key key = FUTEX_KEY_INIT;
552
553         if (!futex_cmpxchg_enabled)
554                 return;
555         /*
556          * We are a ZOMBIE and nobody can enqueue itself on
557          * pi_state_list anymore, but we have to be careful
558          * versus waiters unqueueing themselves:
559          */
560         raw_spin_lock_irq(&curr->pi_lock);
561         while (!list_empty(head)) {
562
563                 next = head->next;
564                 pi_state = list_entry(next, struct futex_pi_state, list);
565                 key = pi_state->key;
566                 hb = hash_futex(&key);
567                 raw_spin_unlock_irq(&curr->pi_lock);
568
569                 spin_lock(&hb->lock);
570
571                 raw_spin_lock_irq(&curr->pi_lock);
572                 /*
573                  * We dropped the pi-lock, so re-check whether this
574                  * task still owns the PI-state:
575                  */
576                 if (head->next != next) {
577                         spin_unlock(&hb->lock);
578                         continue;
579                 }
580
581                 WARN_ON(pi_state->owner != curr);
582                 WARN_ON(list_empty(&pi_state->list));
583                 list_del_init(&pi_state->list);
584                 pi_state->owner = NULL;
585                 raw_spin_unlock_irq(&curr->pi_lock);
586
587                 rt_mutex_unlock(&pi_state->pi_mutex);
588
589                 spin_unlock(&hb->lock);
590
591                 raw_spin_lock_irq(&curr->pi_lock);
592         }
593         raw_spin_unlock_irq(&curr->pi_lock);
594 }
595
596 static int
597 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
598                 union futex_key *key, struct futex_pi_state **ps)
599 {
600         struct futex_pi_state *pi_state = NULL;
601         struct futex_q *this, *next;
602         struct task_struct *p;
603         pid_t pid = uval & FUTEX_TID_MASK;
604
605         plist_for_each_entry_safe(this, next, &hb->chain, list) {
606                 if (match_futex(&this->key, key)) {
607                         /*
608                          * Another waiter already exists - bump up
609                          * the refcount and return its pi_state:
610                          */
611                         pi_state = this->pi_state;
612                         /*
613                          * Userspace might have messed up non-PI and PI futexes
614                          */
615                         if (unlikely(!pi_state))
616                                 return -EINVAL;
617
618                         WARN_ON(!atomic_read(&pi_state->refcount));
619
620                         /*
621                          * When pi_state->owner is NULL then the owner died
622                          * and another waiter is on the fly. pi_state->owner
623                          * is fixed up by the task which acquires
624                          * pi_state->rt_mutex.
625                          *
626                          * We do not check for pid == 0 which can happen when
627                          * the owner died and robust_list_exit() cleared the
628                          * TID.
629                          */
630                         if (pid && pi_state->owner) {
631                                 /*
632                                  * Bail out if user space manipulated the
633                                  * futex value.
634                                  */
635                                 if (pid != task_pid_vnr(pi_state->owner))
636                                         return -EINVAL;
637                         }
638
639                         atomic_inc(&pi_state->refcount);
640                         *ps = pi_state;
641
642                         return 0;
643                 }
644         }
645
646         /*
647          * We are the first waiter - try to look up the real owner and attach
648          * the new pi_state to it, but bail out when TID = 0
649          */
650         if (!pid)
651                 return -ESRCH;
652         p = futex_find_get_task(pid);
653         if (!p)
654                 return -ESRCH;
655
656         /*
657          * We need to look at the task state flags to figure out,
658          * whether the task is exiting. To protect against the do_exit
659          * change of the task flags, we do this protected by
660          * p->pi_lock:
661          */
662         raw_spin_lock_irq(&p->pi_lock);
663         if (unlikely(p->flags & PF_EXITING)) {
664                 /*
665                  * The task is on the way out. When PF_EXITPIDONE is
666                  * set, we know that the task has finished the
667                  * cleanup:
668                  */
669                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
670
671                 raw_spin_unlock_irq(&p->pi_lock);
672                 put_task_struct(p);
673                 return ret;
674         }
675
676         pi_state = alloc_pi_state();
677
678         /*
679          * Initialize the pi_mutex in locked state and make 'p'
680          * the owner of it:
681          */
682         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
683
684         /* Store the key for possible exit cleanups: */
685         pi_state->key = *key;
686
687         WARN_ON(!list_empty(&pi_state->list));
688         list_add(&pi_state->list, &p->pi_state_list);
689         pi_state->owner = p;
690         raw_spin_unlock_irq(&p->pi_lock);
691
692         put_task_struct(p);
693
694         *ps = pi_state;
695
696         return 0;
697 }
698
699 /**
700  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
701  * @uaddr:              the pi futex user address
702  * @hb:                 the pi futex hash bucket
703  * @key:                the futex key associated with uaddr and hb
704  * @ps:                 the pi_state pointer where we store the result of the
705  *                      lookup
706  * @task:               the task to perform the atomic lock work for.  This will
707  *                      be "current" except in the case of requeue pi.
708  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
709  *
710  * Return:
711  *  0 - ready to wait;
712  *  1 - acquired the lock;
713  * <0 - error
714  *
715  * The hb->lock and futex_key refs shall be held by the caller.
716  */
717 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
718                                 union futex_key *key,
719                                 struct futex_pi_state **ps,
720                                 struct task_struct *task, int set_waiters)
721 {
722         int lock_taken, ret, force_take = 0;
723         u32 uval, newval, curval, vpid = task_pid_vnr(task);
724
725 retry:
726         ret = lock_taken = 0;
727
728         /*
729          * To avoid races, we attempt to take the lock here again
730          * (by doing a 0 -> TID atomic cmpxchg), while holding all
731          * the locks. It will most likely not succeed.
732          */
733         newval = vpid;
734         if (set_waiters)
735                 newval |= FUTEX_WAITERS;
736
737         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
738                 return -EFAULT;
739
740         /*
741          * Detect deadlocks.
742          */
743         if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
744                 return -EDEADLK;
745
746         /*
747          * Surprise - we got the lock. Just return to userspace:
748          */
749         if (unlikely(!curval))
750                 return 1;
751
752         uval = curval;
753
754         /*
755          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
756          * to wake at the next unlock.
757          */
758         newval = curval | FUTEX_WAITERS;
759
760         /*
761          * Should we force take the futex? See below.
762          */
763         if (unlikely(force_take)) {
764                 /*
765                  * Keep the OWNER_DIED and the WAITERS bit and set the
766                  * new TID value.
767                  */
768                 newval = (curval & ~FUTEX_TID_MASK) | vpid;
769                 force_take = 0;
770                 lock_taken = 1;
771         }
772
773         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
774                 return -EFAULT;
775         if (unlikely(curval != uval))
776                 goto retry;
777
778         /*
779          * We took the lock due to forced take over.
780          */
781         if (unlikely(lock_taken))
782                 return 1;
783
784         /*
785          * We dont have the lock. Look up the PI state (or create it if
786          * we are the first waiter):
787          */
788         ret = lookup_pi_state(uval, hb, key, ps);
789
790         if (unlikely(ret)) {
791                 switch (ret) {
792                 case -ESRCH:
793                         /*
794                          * We failed to find an owner for this
795                          * futex. So we have no pi_state to block
796                          * on. This can happen in two cases:
797                          *
798                          * 1) The owner died
799                          * 2) A stale FUTEX_WAITERS bit
800                          *
801                          * Re-read the futex value.
802                          */
803                         if (get_futex_value_locked(&curval, uaddr))
804                                 return -EFAULT;
805
806                         /*
807                          * If the owner died or we have a stale
808                          * WAITERS bit the owner TID in the user space
809                          * futex is 0.
810                          */
811                         if (!(curval & FUTEX_TID_MASK)) {
812                                 force_take = 1;
813                                 goto retry;
814                         }
815                 default:
816                         break;
817                 }
818         }
819
820         return ret;
821 }
822
823 /**
824  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
825  * @q:  The futex_q to unqueue
826  *
827  * The q->lock_ptr must not be NULL and must be held by the caller.
828  */
829 static void __unqueue_futex(struct futex_q *q)
830 {
831         struct futex_hash_bucket *hb;
832
833         if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
834             || WARN_ON(plist_node_empty(&q->list)))
835                 return;
836
837         hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
838         plist_del(&q->list, &hb->chain);
839 }
840
841 /*
842  * The hash bucket lock must be held when this is called.
843  * Afterwards, the futex_q must not be accessed.
844  */
845 static void wake_futex(struct futex_q *q)
846 {
847         struct task_struct *p = q->task;
848
849         if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
850                 return;
851
852         /*
853          * We set q->lock_ptr = NULL _before_ we wake up the task. If
854          * a non-futex wake up happens on another CPU then the task
855          * might exit and p would dereference a non-existing task
856          * struct. Prevent this by holding a reference on p across the
857          * wake up.
858          */
859         get_task_struct(p);
860
861         __unqueue_futex(q);
862         /*
863          * The waiting task can free the futex_q as soon as
864          * q->lock_ptr = NULL is written, without taking any locks. A
865          * memory barrier is required here to prevent the following
866          * store to lock_ptr from getting ahead of the plist_del.
867          */
868         smp_wmb();
869         q->lock_ptr = NULL;
870
871         wake_up_state(p, TASK_NORMAL);
872         put_task_struct(p);
873 }
874
875 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
876 {
877         struct task_struct *new_owner;
878         struct futex_pi_state *pi_state = this->pi_state;
879         u32 uninitialized_var(curval), newval;
880
881         if (!pi_state)
882                 return -EINVAL;
883
884         /*
885          * If current does not own the pi_state then the futex is
886          * inconsistent and user space fiddled with the futex value.
887          */
888         if (pi_state->owner != current)
889                 return -EINVAL;
890
891         raw_spin_lock(&pi_state->pi_mutex.wait_lock);
892         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
893
894         /*
895          * It is possible that the next waiter (the one that brought
896          * this owner to the kernel) timed out and is no longer
897          * waiting on the lock.
898          */
899         if (!new_owner)
900                 new_owner = this->task;
901
902         /*
903          * We pass it to the next owner. (The WAITERS bit is always
904          * kept enabled while there is PI state around. We must also
905          * preserve the owner died bit.)
906          */
907         if (!(uval & FUTEX_OWNER_DIED)) {
908                 int ret = 0;
909
910                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
911
912                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
913                         ret = -EFAULT;
914                 else if (curval != uval)
915                         ret = -EINVAL;
916                 if (ret) {
917                         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
918                         return ret;
919                 }
920         }
921
922         raw_spin_lock_irq(&pi_state->owner->pi_lock);
923         WARN_ON(list_empty(&pi_state->list));
924         list_del_init(&pi_state->list);
925         raw_spin_unlock_irq(&pi_state->owner->pi_lock);
926
927         raw_spin_lock_irq(&new_owner->pi_lock);
928         WARN_ON(!list_empty(&pi_state->list));
929         list_add(&pi_state->list, &new_owner->pi_state_list);
930         pi_state->owner = new_owner;
931         raw_spin_unlock_irq(&new_owner->pi_lock);
932
933         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
934         rt_mutex_unlock(&pi_state->pi_mutex);
935
936         return 0;
937 }
938
939 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
940 {
941         u32 uninitialized_var(oldval);
942
943         /*
944          * There is no waiter, so we unlock the futex. The owner died
945          * bit has not to be preserved here. We are the owner:
946          */
947         if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
948                 return -EFAULT;
949         if (oldval != uval)
950                 return -EAGAIN;
951
952         return 0;
953 }
954
955 /*
956  * Express the locking dependencies for lockdep:
957  */
958 static inline void
959 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
960 {
961         if (hb1 <= hb2) {
962                 spin_lock(&hb1->lock);
963                 if (hb1 < hb2)
964                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
965         } else { /* hb1 > hb2 */
966                 spin_lock(&hb2->lock);
967                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
968         }
969 }
970
971 static inline void
972 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
973 {
974         spin_unlock(&hb1->lock);
975         if (hb1 != hb2)
976                 spin_unlock(&hb2->lock);
977 }
978
979 /*
980  * Wake up waiters matching bitset queued on this futex (uaddr).
981  */
982 static int
983 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
984 {
985         struct futex_hash_bucket *hb;
986         struct futex_q *this, *next;
987         union futex_key key = FUTEX_KEY_INIT;
988         int ret;
989
990         if (!bitset)
991                 return -EINVAL;
992
993         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
994         if (unlikely(ret != 0))
995                 goto out;
996
997         hb = hash_futex(&key);
998         spin_lock(&hb->lock);
999
1000         plist_for_each_entry_safe(this, next, &hb->chain, list) {
1001                 if (match_futex (&this->key, &key)) {
1002                         if (this->pi_state || this->rt_waiter) {
1003                                 ret = -EINVAL;
1004                                 break;
1005                         }
1006
1007                         /* Check if one of the bits is set in both bitsets */
1008                         if (!(this->bitset & bitset))
1009                                 continue;
1010
1011                         wake_futex(this);
1012                         if (++ret >= nr_wake)
1013                                 break;
1014                 }
1015         }
1016
1017         spin_unlock(&hb->lock);
1018         put_futex_key(&key);
1019 out:
1020         return ret;
1021 }
1022
1023 /*
1024  * Wake up all waiters hashed on the physical page that is mapped
1025  * to this virtual address:
1026  */
1027 static int
1028 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1029               int nr_wake, int nr_wake2, int op)
1030 {
1031         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1032         struct futex_hash_bucket *hb1, *hb2;
1033         struct futex_q *this, *next;
1034         int ret, op_ret;
1035
1036 retry:
1037         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1038         if (unlikely(ret != 0))
1039                 goto out;
1040         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1041         if (unlikely(ret != 0))
1042                 goto out_put_key1;
1043
1044         hb1 = hash_futex(&key1);
1045         hb2 = hash_futex(&key2);
1046
1047 retry_private:
1048         double_lock_hb(hb1, hb2);
1049         op_ret = futex_atomic_op_inuser(op, uaddr2);
1050         if (unlikely(op_ret < 0)) {
1051
1052                 double_unlock_hb(hb1, hb2);
1053
1054 #ifndef CONFIG_MMU
1055                 /*
1056                  * we don't get EFAULT from MMU faults if we don't have an MMU,
1057                  * but we might get them from range checking
1058                  */
1059                 ret = op_ret;
1060                 goto out_put_keys;
1061 #endif
1062
1063                 if (unlikely(op_ret != -EFAULT)) {
1064                         ret = op_ret;
1065                         goto out_put_keys;
1066                 }
1067
1068                 ret = fault_in_user_writeable(uaddr2);
1069                 if (ret)
1070                         goto out_put_keys;
1071
1072                 if (!(flags & FLAGS_SHARED))
1073                         goto retry_private;
1074
1075                 put_futex_key(&key2);
1076                 put_futex_key(&key1);
1077                 goto retry;
1078         }
1079
1080         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1081                 if (match_futex (&this->key, &key1)) {
1082                         if (this->pi_state || this->rt_waiter) {
1083                                 ret = -EINVAL;
1084                                 goto out_unlock;
1085                         }
1086                         wake_futex(this);
1087                         if (++ret >= nr_wake)
1088                                 break;
1089                 }
1090         }
1091
1092         if (op_ret > 0) {
1093                 op_ret = 0;
1094                 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1095                         if (match_futex (&this->key, &key2)) {
1096                                 if (this->pi_state || this->rt_waiter) {
1097                                         ret = -EINVAL;
1098                                         goto out_unlock;
1099                                 }
1100                                 wake_futex(this);
1101                                 if (++op_ret >= nr_wake2)
1102                                         break;
1103                         }
1104                 }
1105                 ret += op_ret;
1106         }
1107
1108 out_unlock:
1109         double_unlock_hb(hb1, hb2);
1110 out_put_keys:
1111         put_futex_key(&key2);
1112 out_put_key1:
1113         put_futex_key(&key1);
1114 out:
1115         return ret;
1116 }
1117
1118 /**
1119  * requeue_futex() - Requeue a futex_q from one hb to another
1120  * @q:          the futex_q to requeue
1121  * @hb1:        the source hash_bucket
1122  * @hb2:        the target hash_bucket
1123  * @key2:       the new key for the requeued futex_q
1124  */
1125 static inline
1126 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1127                    struct futex_hash_bucket *hb2, union futex_key *key2)
1128 {
1129
1130         /*
1131          * If key1 and key2 hash to the same bucket, no need to
1132          * requeue.
1133          */
1134         if (likely(&hb1->chain != &hb2->chain)) {
1135                 plist_del(&q->list, &hb1->chain);
1136                 plist_add(&q->list, &hb2->chain);
1137                 q->lock_ptr = &hb2->lock;
1138         }
1139         get_futex_key_refs(key2);
1140         q->key = *key2;
1141 }
1142
1143 /**
1144  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1145  * @q:          the futex_q
1146  * @key:        the key of the requeue target futex
1147  * @hb:         the hash_bucket of the requeue target futex
1148  *
1149  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1150  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1151  * to the requeue target futex so the waiter can detect the wakeup on the right
1152  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1153  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1154  * to protect access to the pi_state to fixup the owner later.  Must be called
1155  * with both q->lock_ptr and hb->lock held.
1156  */
1157 static inline
1158 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1159                            struct futex_hash_bucket *hb)
1160 {
1161         get_futex_key_refs(key);
1162         q->key = *key;
1163
1164         __unqueue_futex(q);
1165
1166         WARN_ON(!q->rt_waiter);
1167         q->rt_waiter = NULL;
1168
1169         q->lock_ptr = &hb->lock;
1170
1171         wake_up_state(q->task, TASK_NORMAL);
1172 }
1173
1174 /**
1175  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1176  * @pifutex:            the user address of the to futex
1177  * @hb1:                the from futex hash bucket, must be locked by the caller
1178  * @hb2:                the to futex hash bucket, must be locked by the caller
1179  * @key1:               the from futex key
1180  * @key2:               the to futex key
1181  * @ps:                 address to store the pi_state pointer
1182  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1183  *
1184  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1185  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1186  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1187  * hb1 and hb2 must be held by the caller.
1188  *
1189  * Return:
1190  *  0 - failed to acquire the lock atomically;
1191  *  1 - acquired the lock;
1192  * <0 - error
1193  */
1194 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1195                                  struct futex_hash_bucket *hb1,
1196                                  struct futex_hash_bucket *hb2,
1197                                  union futex_key *key1, union futex_key *key2,
1198                                  struct futex_pi_state **ps, int set_waiters)
1199 {
1200         struct futex_q *top_waiter = NULL;
1201         u32 curval;
1202         int ret;
1203
1204         if (get_futex_value_locked(&curval, pifutex))
1205                 return -EFAULT;
1206
1207         /*
1208          * Find the top_waiter and determine if there are additional waiters.
1209          * If the caller intends to requeue more than 1 waiter to pifutex,
1210          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1211          * as we have means to handle the possible fault.  If not, don't set
1212          * the bit unecessarily as it will force the subsequent unlock to enter
1213          * the kernel.
1214          */
1215         top_waiter = futex_top_waiter(hb1, key1);
1216
1217         /* There are no waiters, nothing for us to do. */
1218         if (!top_waiter)
1219                 return 0;
1220
1221         /* Ensure we requeue to the expected futex. */
1222         if (!match_futex(top_waiter->requeue_pi_key, key2))
1223                 return -EINVAL;
1224
1225         /*
1226          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1227          * the contended case or if set_waiters is 1.  The pi_state is returned
1228          * in ps in contended cases.
1229          */
1230         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1231                                    set_waiters);
1232         if (ret == 1)
1233                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1234
1235         return ret;
1236 }
1237
1238 /**
1239  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1240  * @uaddr1:     source futex user address
1241  * @flags:      futex flags (FLAGS_SHARED, etc.)
1242  * @uaddr2:     target futex user address
1243  * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1244  * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1245  * @cmpval:     @uaddr1 expected value (or %NULL)
1246  * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1247  *              pi futex (pi to pi requeue is not supported)
1248  *
1249  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1250  * uaddr2 atomically on behalf of the top waiter.
1251  *
1252  * Return:
1253  * >=0 - on success, the number of tasks requeued or woken;
1254  *  <0 - on error
1255  */
1256 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1257                          u32 __user *uaddr2, int nr_wake, int nr_requeue,
1258                          u32 *cmpval, int requeue_pi)
1259 {
1260         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1261         int drop_count = 0, task_count = 0, ret;
1262         struct futex_pi_state *pi_state = NULL;
1263         struct futex_hash_bucket *hb1, *hb2;
1264         struct futex_q *this, *next;
1265         u32 curval2;
1266
1267         if (requeue_pi) {
1268                 /*
1269                  * requeue_pi requires a pi_state, try to allocate it now
1270                  * without any locks in case it fails.
1271                  */
1272                 if (refill_pi_state_cache())
1273                         return -ENOMEM;
1274                 /*
1275                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1276                  * + nr_requeue, since it acquires the rt_mutex prior to
1277                  * returning to userspace, so as to not leave the rt_mutex with
1278                  * waiters and no owner.  However, second and third wake-ups
1279                  * cannot be predicted as they involve race conditions with the
1280                  * first wake and a fault while looking up the pi_state.  Both
1281                  * pthread_cond_signal() and pthread_cond_broadcast() should
1282                  * use nr_wake=1.
1283                  */
1284                 if (nr_wake != 1)
1285                         return -EINVAL;
1286         }
1287
1288 retry:
1289         if (pi_state != NULL) {
1290                 /*
1291                  * We will have to lookup the pi_state again, so free this one
1292                  * to keep the accounting correct.
1293                  */
1294                 free_pi_state(pi_state);
1295                 pi_state = NULL;
1296         }
1297
1298         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1299         if (unlikely(ret != 0))
1300                 goto out;
1301         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1302                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1303         if (unlikely(ret != 0))
1304                 goto out_put_key1;
1305
1306         hb1 = hash_futex(&key1);
1307         hb2 = hash_futex(&key2);
1308
1309 retry_private:
1310         double_lock_hb(hb1, hb2);
1311
1312         if (likely(cmpval != NULL)) {
1313                 u32 curval;
1314
1315                 ret = get_futex_value_locked(&curval, uaddr1);
1316
1317                 if (unlikely(ret)) {
1318                         double_unlock_hb(hb1, hb2);
1319
1320                         ret = get_user(curval, uaddr1);
1321                         if (ret)
1322                                 goto out_put_keys;
1323
1324                         if (!(flags & FLAGS_SHARED))
1325                                 goto retry_private;
1326
1327                         put_futex_key(&key2);
1328                         put_futex_key(&key1);
1329                         goto retry;
1330                 }
1331                 if (curval != *cmpval) {
1332                         ret = -EAGAIN;
1333                         goto out_unlock;
1334                 }
1335         }
1336
1337         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1338                 /*
1339                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1340                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1341                  * bit.  We force this here where we are able to easily handle
1342                  * faults rather in the requeue loop below.
1343                  */
1344                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1345                                                  &key2, &pi_state, nr_requeue);
1346
1347                 /*
1348                  * At this point the top_waiter has either taken uaddr2 or is
1349                  * waiting on it.  If the former, then the pi_state will not
1350                  * exist yet, look it up one more time to ensure we have a
1351                  * reference to it.
1352                  */
1353                 if (ret == 1) {
1354                         WARN_ON(pi_state);
1355                         drop_count++;
1356                         task_count++;
1357                         ret = get_futex_value_locked(&curval2, uaddr2);
1358                         if (!ret)
1359                                 ret = lookup_pi_state(curval2, hb2, &key2,
1360                                                       &pi_state);
1361                 }
1362
1363                 switch (ret) {
1364                 case 0:
1365                         break;
1366                 case -EFAULT:
1367                         double_unlock_hb(hb1, hb2);
1368                         put_futex_key(&key2);
1369                         put_futex_key(&key1);
1370                         ret = fault_in_user_writeable(uaddr2);
1371                         if (!ret)
1372                                 goto retry;
1373                         goto out;
1374                 case -EAGAIN:
1375                         /* The owner was exiting, try again. */
1376                         double_unlock_hb(hb1, hb2);
1377                         put_futex_key(&key2);
1378                         put_futex_key(&key1);
1379                         cond_resched();
1380                         goto retry;
1381                 default:
1382                         goto out_unlock;
1383                 }
1384         }
1385
1386         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1387                 if (task_count - nr_wake >= nr_requeue)
1388                         break;
1389
1390                 if (!match_futex(&this->key, &key1))
1391                         continue;
1392
1393                 /*
1394                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1395                  * be paired with each other and no other futex ops.
1396                  *
1397                  * We should never be requeueing a futex_q with a pi_state,
1398                  * which is awaiting a futex_unlock_pi().
1399                  */
1400                 if ((requeue_pi && !this->rt_waiter) ||
1401                     (!requeue_pi && this->rt_waiter) ||
1402                     this->pi_state) {
1403                         ret = -EINVAL;
1404                         break;
1405                 }
1406
1407                 /*
1408                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1409                  * lock, we already woke the top_waiter.  If not, it will be
1410                  * woken by futex_unlock_pi().
1411                  */
1412                 if (++task_count <= nr_wake && !requeue_pi) {
1413                         wake_futex(this);
1414                         continue;
1415                 }
1416
1417                 /* Ensure we requeue to the expected futex for requeue_pi. */
1418                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1419                         ret = -EINVAL;
1420                         break;
1421                 }
1422
1423                 /*
1424                  * Requeue nr_requeue waiters and possibly one more in the case
1425                  * of requeue_pi if we couldn't acquire the lock atomically.
1426                  */
1427                 if (requeue_pi) {
1428                         /* Prepare the waiter to take the rt_mutex. */
1429                         atomic_inc(&pi_state->refcount);
1430                         this->pi_state = pi_state;
1431                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1432                                                         this->rt_waiter,
1433                                                         this->task, 1);
1434                         if (ret == 1) {
1435                                 /* We got the lock. */
1436                                 requeue_pi_wake_futex(this, &key2, hb2);
1437                                 drop_count++;
1438                                 continue;
1439                         } else if (ret) {
1440                                 /* -EDEADLK */
1441                                 this->pi_state = NULL;
1442                                 free_pi_state(pi_state);
1443                                 goto out_unlock;
1444                         }
1445                 }
1446                 requeue_futex(this, hb1, hb2, &key2);
1447                 drop_count++;
1448         }
1449
1450 out_unlock:
1451         double_unlock_hb(hb1, hb2);
1452
1453         /*
1454          * drop_futex_key_refs() must be called outside the spinlocks. During
1455          * the requeue we moved futex_q's from the hash bucket at key1 to the
1456          * one at key2 and updated their key pointer.  We no longer need to
1457          * hold the references to key1.
1458          */
1459         while (--drop_count >= 0)
1460                 drop_futex_key_refs(&key1);
1461
1462 out_put_keys:
1463         put_futex_key(&key2);
1464 out_put_key1:
1465         put_futex_key(&key1);
1466 out:
1467         if (pi_state != NULL)
1468                 free_pi_state(pi_state);
1469         return ret ? ret : task_count;
1470 }
1471
1472 /* The key must be already stored in q->key. */
1473 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1474         __acquires(&hb->lock)
1475 {
1476         struct futex_hash_bucket *hb;
1477
1478         hb = hash_futex(&q->key);
1479         q->lock_ptr = &hb->lock;
1480
1481         spin_lock(&hb->lock);
1482         return hb;
1483 }
1484
1485 static inline void
1486 queue_unlock(struct futex_hash_bucket *hb)
1487         __releases(&hb->lock)
1488 {
1489         spin_unlock(&hb->lock);
1490 }
1491
1492 /**
1493  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1494  * @q:  The futex_q to enqueue
1495  * @hb: The destination hash bucket
1496  *
1497  * The hb->lock must be held by the caller, and is released here. A call to
1498  * queue_me() is typically paired with exactly one call to unqueue_me().  The
1499  * exceptions involve the PI related operations, which may use unqueue_me_pi()
1500  * or nothing if the unqueue is done as part of the wake process and the unqueue
1501  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1502  * an example).
1503  */
1504 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1505         __releases(&hb->lock)
1506 {
1507         int prio;
1508
1509         /*
1510          * The priority used to register this element is
1511          * - either the real thread-priority for the real-time threads
1512          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1513          * - or MAX_RT_PRIO for non-RT threads.
1514          * Thus, all RT-threads are woken first in priority order, and
1515          * the others are woken last, in FIFO order.
1516          */
1517         prio = min(current->normal_prio, MAX_RT_PRIO);
1518
1519         plist_node_init(&q->list, prio);
1520         plist_add(&q->list, &hb->chain);
1521         q->task = current;
1522         spin_unlock(&hb->lock);
1523 }
1524
1525 /**
1526  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1527  * @q:  The futex_q to unqueue
1528  *
1529  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1530  * be paired with exactly one earlier call to queue_me().
1531  *
1532  * Return:
1533  *   1 - if the futex_q was still queued (and we removed unqueued it);
1534  *   0 - if the futex_q was already removed by the waking thread
1535  */
1536 static int unqueue_me(struct futex_q *q)
1537 {
1538         spinlock_t *lock_ptr;
1539         int ret = 0;
1540
1541         /* In the common case we don't take the spinlock, which is nice. */
1542 retry:
1543         lock_ptr = q->lock_ptr;
1544         barrier();
1545         if (lock_ptr != NULL) {
1546                 spin_lock(lock_ptr);
1547                 /*
1548                  * q->lock_ptr can change between reading it and
1549                  * spin_lock(), causing us to take the wrong lock.  This
1550                  * corrects the race condition.
1551                  *
1552                  * Reasoning goes like this: if we have the wrong lock,
1553                  * q->lock_ptr must have changed (maybe several times)
1554                  * between reading it and the spin_lock().  It can
1555                  * change again after the spin_lock() but only if it was
1556                  * already changed before the spin_lock().  It cannot,
1557                  * however, change back to the original value.  Therefore
1558                  * we can detect whether we acquired the correct lock.
1559                  */
1560                 if (unlikely(lock_ptr != q->lock_ptr)) {
1561                         spin_unlock(lock_ptr);
1562                         goto retry;
1563                 }
1564                 __unqueue_futex(q);
1565
1566                 BUG_ON(q->pi_state);
1567
1568                 spin_unlock(lock_ptr);
1569                 ret = 1;
1570         }
1571
1572         drop_futex_key_refs(&q->key);
1573         return ret;
1574 }
1575
1576 /*
1577  * PI futexes can not be requeued and must remove themself from the
1578  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1579  * and dropped here.
1580  */
1581 static void unqueue_me_pi(struct futex_q *q)
1582         __releases(q->lock_ptr)
1583 {
1584         __unqueue_futex(q);
1585
1586         BUG_ON(!q->pi_state);
1587         free_pi_state(q->pi_state);
1588         q->pi_state = NULL;
1589
1590         spin_unlock(q->lock_ptr);
1591 }
1592
1593 /*
1594  * Fixup the pi_state owner with the new owner.
1595  *
1596  * Must be called with hash bucket lock held and mm->sem held for non
1597  * private futexes.
1598  */
1599 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1600                                 struct task_struct *newowner)
1601 {
1602         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1603         struct futex_pi_state *pi_state = q->pi_state;
1604         struct task_struct *oldowner = pi_state->owner;
1605         u32 uval, uninitialized_var(curval), newval;
1606         int ret;
1607
1608         /* Owner died? */
1609         if (!pi_state->owner)
1610                 newtid |= FUTEX_OWNER_DIED;
1611
1612         /*
1613          * We are here either because we stole the rtmutex from the
1614          * previous highest priority waiter or we are the highest priority
1615          * waiter but failed to get the rtmutex the first time.
1616          * We have to replace the newowner TID in the user space variable.
1617          * This must be atomic as we have to preserve the owner died bit here.
1618          *
1619          * Note: We write the user space value _before_ changing the pi_state
1620          * because we can fault here. Imagine swapped out pages or a fork
1621          * that marked all the anonymous memory readonly for cow.
1622          *
1623          * Modifying pi_state _before_ the user space value would
1624          * leave the pi_state in an inconsistent state when we fault
1625          * here, because we need to drop the hash bucket lock to
1626          * handle the fault. This might be observed in the PID check
1627          * in lookup_pi_state.
1628          */
1629 retry:
1630         if (get_futex_value_locked(&uval, uaddr))
1631                 goto handle_fault;
1632
1633         while (1) {
1634                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1635
1636                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1637                         goto handle_fault;
1638                 if (curval == uval)
1639                         break;
1640                 uval = curval;
1641         }
1642
1643         /*
1644          * We fixed up user space. Now we need to fix the pi_state
1645          * itself.
1646          */
1647         if (pi_state->owner != NULL) {
1648                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1649                 WARN_ON(list_empty(&pi_state->list));
1650                 list_del_init(&pi_state->list);
1651                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1652         }
1653
1654         pi_state->owner = newowner;
1655
1656         raw_spin_lock_irq(&newowner->pi_lock);
1657         WARN_ON(!list_empty(&pi_state->list));
1658         list_add(&pi_state->list, &newowner->pi_state_list);
1659         raw_spin_unlock_irq(&newowner->pi_lock);
1660         return 0;
1661
1662         /*
1663          * To handle the page fault we need to drop the hash bucket
1664          * lock here. That gives the other task (either the highest priority
1665          * waiter itself or the task which stole the rtmutex) the
1666          * chance to try the fixup of the pi_state. So once we are
1667          * back from handling the fault we need to check the pi_state
1668          * after reacquiring the hash bucket lock and before trying to
1669          * do another fixup. When the fixup has been done already we
1670          * simply return.
1671          */
1672 handle_fault:
1673         spin_unlock(q->lock_ptr);
1674
1675         ret = fault_in_user_writeable(uaddr);
1676
1677         spin_lock(q->lock_ptr);
1678
1679         /*
1680          * Check if someone else fixed it for us:
1681          */
1682         if (pi_state->owner != oldowner)
1683                 return 0;
1684
1685         if (ret)
1686                 return ret;
1687
1688         goto retry;
1689 }
1690
1691 static long futex_wait_restart(struct restart_block *restart);
1692
1693 /**
1694  * fixup_owner() - Post lock pi_state and corner case management
1695  * @uaddr:      user address of the futex
1696  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1697  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1698  *
1699  * After attempting to lock an rt_mutex, this function is called to cleanup
1700  * the pi_state owner as well as handle race conditions that may allow us to
1701  * acquire the lock. Must be called with the hb lock held.
1702  *
1703  * Return:
1704  *  1 - success, lock taken;
1705  *  0 - success, lock not taken;
1706  * <0 - on error (-EFAULT)
1707  */
1708 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1709 {
1710         struct task_struct *owner;
1711         int ret = 0;
1712
1713         if (locked) {
1714                 /*
1715                  * Got the lock. We might not be the anticipated owner if we
1716                  * did a lock-steal - fix up the PI-state in that case:
1717                  */
1718                 if (q->pi_state->owner != current)
1719                         ret = fixup_pi_state_owner(uaddr, q, current);
1720                 goto out;
1721         }
1722
1723         /*
1724          * Catch the rare case, where the lock was released when we were on the
1725          * way back before we locked the hash bucket.
1726          */
1727         if (q->pi_state->owner == current) {
1728                 /*
1729                  * Try to get the rt_mutex now. This might fail as some other
1730                  * task acquired the rt_mutex after we removed ourself from the
1731                  * rt_mutex waiters list.
1732                  */
1733                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1734                         locked = 1;
1735                         goto out;
1736                 }
1737
1738                 /*
1739                  * pi_state is incorrect, some other task did a lock steal and
1740                  * we returned due to timeout or signal without taking the
1741                  * rt_mutex. Too late.
1742                  */
1743                 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1744                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1745                 if (!owner)
1746                         owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1747                 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1748                 ret = fixup_pi_state_owner(uaddr, q, owner);
1749                 goto out;
1750         }
1751
1752         /*
1753          * Paranoia check. If we did not take the lock, then we should not be
1754          * the owner of the rt_mutex.
1755          */
1756         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1757                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1758                                 "pi-state %p\n", ret,
1759                                 q->pi_state->pi_mutex.owner,
1760                                 q->pi_state->owner);
1761
1762 out:
1763         return ret ? ret : locked;
1764 }
1765
1766 /**
1767  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1768  * @hb:         the futex hash bucket, must be locked by the caller
1769  * @q:          the futex_q to queue up on
1770  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1771  */
1772 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1773                                 struct hrtimer_sleeper *timeout)
1774 {
1775         /*
1776          * The task state is guaranteed to be set before another task can
1777          * wake it. set_current_state() is implemented using set_mb() and
1778          * queue_me() calls spin_unlock() upon completion, both serializing
1779          * access to the hash list and forcing another memory barrier.
1780          */
1781         set_current_state(TASK_INTERRUPTIBLE);
1782         queue_me(q, hb);
1783
1784         /* Arm the timer */
1785         if (timeout) {
1786                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1787                 if (!hrtimer_active(&timeout->timer))
1788                         timeout->task = NULL;
1789         }
1790
1791         /*
1792          * If we have been removed from the hash list, then another task
1793          * has tried to wake us, and we can skip the call to schedule().
1794          */
1795         if (likely(!plist_node_empty(&q->list))) {
1796                 /*
1797                  * If the timer has already expired, current will already be
1798                  * flagged for rescheduling. Only call schedule if there
1799                  * is no timeout, or if it has yet to expire.
1800                  */
1801                 if (!timeout || timeout->task)
1802                         freezable_schedule();
1803         }
1804         __set_current_state(TASK_RUNNING);
1805 }
1806
1807 /**
1808  * futex_wait_setup() - Prepare to wait on a futex
1809  * @uaddr:      the futex userspace address
1810  * @val:        the expected value
1811  * @flags:      futex flags (FLAGS_SHARED, etc.)
1812  * @q:          the associated futex_q
1813  * @hb:         storage for hash_bucket pointer to be returned to caller
1814  *
1815  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1816  * compare it with the expected value.  Handle atomic faults internally.
1817  * Return with the hb lock held and a q.key reference on success, and unlocked
1818  * with no q.key reference on failure.
1819  *
1820  * Return:
1821  *  0 - uaddr contains val and hb has been locked;
1822  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1823  */
1824 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1825                            struct futex_q *q, struct futex_hash_bucket **hb)
1826 {
1827         u32 uval;
1828         int ret;
1829
1830         /*
1831          * Access the page AFTER the hash-bucket is locked.
1832          * Order is important:
1833          *
1834          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1835          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1836          *
1837          * The basic logical guarantee of a futex is that it blocks ONLY
1838          * if cond(var) is known to be true at the time of blocking, for
1839          * any cond.  If we locked the hash-bucket after testing *uaddr, that
1840          * would open a race condition where we could block indefinitely with
1841          * cond(var) false, which would violate the guarantee.
1842          *
1843          * On the other hand, we insert q and release the hash-bucket only
1844          * after testing *uaddr.  This guarantees that futex_wait() will NOT
1845          * absorb a wakeup if *uaddr does not match the desired values
1846          * while the syscall executes.
1847          */
1848 retry:
1849         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
1850         if (unlikely(ret != 0))
1851                 return ret;
1852
1853 retry_private:
1854         *hb = queue_lock(q);
1855
1856         ret = get_futex_value_locked(&uval, uaddr);
1857
1858         if (ret) {
1859                 queue_unlock(*hb);
1860
1861                 ret = get_user(uval, uaddr);
1862                 if (ret)
1863                         goto out;
1864
1865                 if (!(flags & FLAGS_SHARED))
1866                         goto retry_private;
1867
1868                 put_futex_key(&q->key);
1869                 goto retry;
1870         }
1871
1872         if (uval != val) {
1873                 queue_unlock(*hb);
1874                 ret = -EWOULDBLOCK;
1875         }
1876
1877 out:
1878         if (ret)
1879                 put_futex_key(&q->key);
1880         return ret;
1881 }
1882
1883 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1884                       ktime_t *abs_time, u32 bitset)
1885 {
1886         struct hrtimer_sleeper timeout, *to = NULL;
1887         struct restart_block *restart;
1888         struct futex_hash_bucket *hb;
1889         struct futex_q q = futex_q_init;
1890         int ret;
1891
1892         if (!bitset)
1893                 return -EINVAL;
1894         q.bitset = bitset;
1895
1896         if (abs_time) {
1897                 to = &timeout;
1898
1899                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1900                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
1901                                       HRTIMER_MODE_ABS);
1902                 hrtimer_init_sleeper(to, current);
1903                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1904                                              current->timer_slack_ns);
1905         }
1906
1907 retry:
1908         /*
1909          * Prepare to wait on uaddr. On success, holds hb lock and increments
1910          * q.key refs.
1911          */
1912         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1913         if (ret)
1914                 goto out;
1915
1916         /* queue_me and wait for wakeup, timeout, or a signal. */
1917         futex_wait_queue_me(hb, &q, to);
1918
1919         /* If we were woken (and unqueued), we succeeded, whatever. */
1920         ret = 0;
1921         /* unqueue_me() drops q.key ref */
1922         if (!unqueue_me(&q))
1923                 goto out;
1924         ret = -ETIMEDOUT;
1925         if (to && !to->task)
1926                 goto out;
1927
1928         /*
1929          * We expect signal_pending(current), but we might be the
1930          * victim of a spurious wakeup as well.
1931          */
1932         if (!signal_pending(current))
1933                 goto retry;
1934
1935         ret = -ERESTARTSYS;
1936         if (!abs_time)
1937                 goto out;
1938
1939         restart = &current_thread_info()->restart_block;
1940         restart->fn = futex_wait_restart;
1941         restart->futex.uaddr = uaddr;
1942         restart->futex.val = val;
1943         restart->futex.time = abs_time->tv64;
1944         restart->futex.bitset = bitset;
1945         restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1946
1947         ret = -ERESTART_RESTARTBLOCK;
1948
1949 out:
1950         if (to) {
1951                 hrtimer_cancel(&to->timer);
1952                 destroy_hrtimer_on_stack(&to->timer);
1953         }
1954         return ret;
1955 }
1956
1957
1958 static long futex_wait_restart(struct restart_block *restart)
1959 {
1960         u32 __user *uaddr = restart->futex.uaddr;
1961         ktime_t t, *tp = NULL;
1962
1963         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1964                 t.tv64 = restart->futex.time;
1965                 tp = &t;
1966         }
1967         restart->fn = do_no_restart_syscall;
1968
1969         return (long)futex_wait(uaddr, restart->futex.flags,
1970                                 restart->futex.val, tp, restart->futex.bitset);
1971 }
1972
1973
1974 /*
1975  * Userspace tried a 0 -> TID atomic transition of the futex value
1976  * and failed. The kernel side here does the whole locking operation:
1977  * if there are waiters then it will block, it does PI, etc. (Due to
1978  * races the kernel might see a 0 value of the futex too.)
1979  */
1980 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1981                          ktime_t *time, int trylock)
1982 {
1983         struct hrtimer_sleeper timeout, *to = NULL;
1984         struct futex_hash_bucket *hb;
1985         struct futex_q q = futex_q_init;
1986         int res, ret;
1987
1988         if (refill_pi_state_cache())
1989                 return -ENOMEM;
1990
1991         if (time) {
1992                 to = &timeout;
1993                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1994                                       HRTIMER_MODE_ABS);
1995                 hrtimer_init_sleeper(to, current);
1996                 hrtimer_set_expires(&to->timer, *time);
1997         }
1998
1999 retry:
2000         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2001         if (unlikely(ret != 0))
2002                 goto out;
2003
2004 retry_private:
2005         hb = queue_lock(&q);
2006
2007         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2008         if (unlikely(ret)) {
2009                 switch (ret) {
2010                 case 1:
2011                         /* We got the lock. */
2012                         ret = 0;
2013                         goto out_unlock_put_key;
2014                 case -EFAULT:
2015                         goto uaddr_faulted;
2016                 case -EAGAIN:
2017                         /*
2018                          * Task is exiting and we just wait for the
2019                          * exit to complete.
2020                          */
2021                         queue_unlock(hb);
2022                         put_futex_key(&q.key);
2023                         cond_resched();
2024                         goto retry;
2025                 default:
2026                         goto out_unlock_put_key;
2027                 }
2028         }
2029
2030         /*
2031          * Only actually queue now that the atomic ops are done:
2032          */
2033         queue_me(&q, hb);
2034
2035         WARN_ON(!q.pi_state);
2036         /*
2037          * Block on the PI mutex:
2038          */
2039         if (!trylock)
2040                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2041         else {
2042                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2043                 /* Fixup the trylock return value: */
2044                 ret = ret ? 0 : -EWOULDBLOCK;
2045         }
2046
2047         spin_lock(q.lock_ptr);
2048         /*
2049          * Fixup the pi_state owner and possibly acquire the lock if we
2050          * haven't already.
2051          */
2052         res = fixup_owner(uaddr, &q, !ret);
2053         /*
2054          * If fixup_owner() returned an error, proprogate that.  If it acquired
2055          * the lock, clear our -ETIMEDOUT or -EINTR.
2056          */
2057         if (res)
2058                 ret = (res < 0) ? res : 0;
2059
2060         /*
2061          * If fixup_owner() faulted and was unable to handle the fault, unlock
2062          * it and return the fault to userspace.
2063          */
2064         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2065                 rt_mutex_unlock(&q.pi_state->pi_mutex);
2066
2067         /* Unqueue and drop the lock */
2068         unqueue_me_pi(&q);
2069
2070         goto out_put_key;
2071
2072 out_unlock_put_key:
2073         queue_unlock(hb);
2074
2075 out_put_key:
2076         put_futex_key(&q.key);
2077 out:
2078         if (to)
2079                 destroy_hrtimer_on_stack(&to->timer);
2080         return ret != -EINTR ? ret : -ERESTARTNOINTR;
2081
2082 uaddr_faulted:
2083         queue_unlock(hb);
2084
2085         ret = fault_in_user_writeable(uaddr);
2086         if (ret)
2087                 goto out_put_key;
2088
2089         if (!(flags & FLAGS_SHARED))
2090                 goto retry_private;
2091
2092         put_futex_key(&q.key);
2093         goto retry;
2094 }
2095
2096 /*
2097  * Userspace attempted a TID -> 0 atomic transition, and failed.
2098  * This is the in-kernel slowpath: we look up the PI state (if any),
2099  * and do the rt-mutex unlock.
2100  */
2101 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2102 {
2103         struct futex_hash_bucket *hb;
2104         struct futex_q *this, *next;
2105         union futex_key key = FUTEX_KEY_INIT;
2106         u32 uval, vpid = task_pid_vnr(current);
2107         int ret;
2108
2109 retry:
2110         if (get_user(uval, uaddr))
2111                 return -EFAULT;
2112         /*
2113          * We release only a lock we actually own:
2114          */
2115         if ((uval & FUTEX_TID_MASK) != vpid)
2116                 return -EPERM;
2117
2118         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2119         if (unlikely(ret != 0))
2120                 goto out;
2121
2122         hb = hash_futex(&key);
2123         spin_lock(&hb->lock);
2124
2125         /*
2126          * To avoid races, try to do the TID -> 0 atomic transition
2127          * again. If it succeeds then we can return without waking
2128          * anyone else up:
2129          */
2130         if (!(uval & FUTEX_OWNER_DIED) &&
2131             cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2132                 goto pi_faulted;
2133         /*
2134          * Rare case: we managed to release the lock atomically,
2135          * no need to wake anyone else up:
2136          */
2137         if (unlikely(uval == vpid))
2138                 goto out_unlock;
2139
2140         /*
2141          * Ok, other tasks may need to be woken up - check waiters
2142          * and do the wakeup if necessary:
2143          */
2144         plist_for_each_entry_safe(this, next, &hb->chain, list) {
2145                 if (!match_futex (&this->key, &key))
2146                         continue;
2147                 ret = wake_futex_pi(uaddr, uval, this);
2148                 /*
2149                  * The atomic access to the futex value
2150                  * generated a pagefault, so retry the
2151                  * user-access and the wakeup:
2152                  */
2153                 if (ret == -EFAULT)
2154                         goto pi_faulted;
2155                 goto out_unlock;
2156         }
2157         /*
2158          * No waiters - kernel unlocks the futex:
2159          */
2160         if (!(uval & FUTEX_OWNER_DIED)) {
2161                 ret = unlock_futex_pi(uaddr, uval);
2162                 if (ret == -EFAULT)
2163                         goto pi_faulted;
2164         }
2165
2166 out_unlock:
2167         spin_unlock(&hb->lock);
2168         put_futex_key(&key);
2169
2170 out:
2171         return ret;
2172
2173 pi_faulted:
2174         spin_unlock(&hb->lock);
2175         put_futex_key(&key);
2176
2177         ret = fault_in_user_writeable(uaddr);
2178         if (!ret)
2179                 goto retry;
2180
2181         return ret;
2182 }
2183
2184 /**
2185  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2186  * @hb:         the hash_bucket futex_q was original enqueued on
2187  * @q:          the futex_q woken while waiting to be requeued
2188  * @key2:       the futex_key of the requeue target futex
2189  * @timeout:    the timeout associated with the wait (NULL if none)
2190  *
2191  * Detect if the task was woken on the initial futex as opposed to the requeue
2192  * target futex.  If so, determine if it was a timeout or a signal that caused
2193  * the wakeup and return the appropriate error code to the caller.  Must be
2194  * called with the hb lock held.
2195  *
2196  * Return:
2197  *  0 = no early wakeup detected;
2198  * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2199  */
2200 static inline
2201 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2202                                    struct futex_q *q, union futex_key *key2,
2203                                    struct hrtimer_sleeper *timeout)
2204 {
2205         int ret = 0;
2206
2207         /*
2208          * With the hb lock held, we avoid races while we process the wakeup.
2209          * We only need to hold hb (and not hb2) to ensure atomicity as the
2210          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2211          * It can't be requeued from uaddr2 to something else since we don't
2212          * support a PI aware source futex for requeue.
2213          */
2214         if (!match_futex(&q->key, key2)) {
2215                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2216                 /*
2217                  * We were woken prior to requeue by a timeout or a signal.
2218                  * Unqueue the futex_q and determine which it was.
2219                  */
2220                 plist_del(&q->list, &hb->chain);
2221
2222                 /* Handle spurious wakeups gracefully */
2223                 ret = -EWOULDBLOCK;
2224                 if (timeout && !timeout->task)
2225                         ret = -ETIMEDOUT;
2226                 else if (signal_pending(current))
2227                         ret = -ERESTARTNOINTR;
2228         }
2229         return ret;
2230 }
2231
2232 /**
2233  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2234  * @uaddr:      the futex we initially wait on (non-pi)
2235  * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2236  *              the same type, no requeueing from private to shared, etc.
2237  * @val:        the expected value of uaddr
2238  * @abs_time:   absolute timeout
2239  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2240  * @uaddr2:     the pi futex we will take prior to returning to user-space
2241  *
2242  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2243  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2244  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2245  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2246  * without one, the pi logic would not know which task to boost/deboost, if
2247  * there was a need to.
2248  *
2249  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2250  * via the following--
2251  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2252  * 2) wakeup on uaddr2 after a requeue
2253  * 3) signal
2254  * 4) timeout
2255  *
2256  * If 3, cleanup and return -ERESTARTNOINTR.
2257  *
2258  * If 2, we may then block on trying to take the rt_mutex and return via:
2259  * 5) successful lock
2260  * 6) signal
2261  * 7) timeout
2262  * 8) other lock acquisition failure
2263  *
2264  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2265  *
2266  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2267  *
2268  * Return:
2269  *  0 - On success;
2270  * <0 - On error
2271  */
2272 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2273                                  u32 val, ktime_t *abs_time, u32 bitset,
2274                                  u32 __user *uaddr2)
2275 {
2276         struct hrtimer_sleeper timeout, *to = NULL;
2277         struct rt_mutex_waiter rt_waiter;
2278         struct rt_mutex *pi_mutex = NULL;
2279         struct futex_hash_bucket *hb;
2280         union futex_key key2 = FUTEX_KEY_INIT;
2281         struct futex_q q = futex_q_init;
2282         int res, ret;
2283
2284         if (uaddr == uaddr2)
2285                 return -EINVAL;
2286
2287         if (!bitset)
2288                 return -EINVAL;
2289
2290         if (abs_time) {
2291                 to = &timeout;
2292                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2293                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2294                                       HRTIMER_MODE_ABS);
2295                 hrtimer_init_sleeper(to, current);
2296                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2297                                              current->timer_slack_ns);
2298         }
2299
2300         /*
2301          * The waiter is allocated on our stack, manipulated by the requeue
2302          * code while we sleep on uaddr.
2303          */
2304         debug_rt_mutex_init_waiter(&rt_waiter);
2305         rt_waiter.task = NULL;
2306
2307         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2308         if (unlikely(ret != 0))
2309                 goto out;
2310
2311         q.bitset = bitset;
2312         q.rt_waiter = &rt_waiter;
2313         q.requeue_pi_key = &key2;
2314
2315         /*
2316          * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2317          * count.
2318          */
2319         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2320         if (ret)
2321                 goto out_key2;
2322
2323         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2324         futex_wait_queue_me(hb, &q, to);
2325
2326         spin_lock(&hb->lock);
2327         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2328         spin_unlock(&hb->lock);
2329         if (ret)
2330                 goto out_put_keys;
2331
2332         /*
2333          * In order for us to be here, we know our q.key == key2, and since
2334          * we took the hb->lock above, we also know that futex_requeue() has
2335          * completed and we no longer have to concern ourselves with a wakeup
2336          * race with the atomic proxy lock acquisition by the requeue code. The
2337          * futex_requeue dropped our key1 reference and incremented our key2
2338          * reference count.
2339          */
2340
2341         /* Check if the requeue code acquired the second futex for us. */
2342         if (!q.rt_waiter) {
2343                 /*
2344                  * Got the lock. We might not be the anticipated owner if we
2345                  * did a lock-steal - fix up the PI-state in that case.
2346                  */
2347                 if (q.pi_state && (q.pi_state->owner != current)) {
2348                         spin_lock(q.lock_ptr);
2349                         ret = fixup_pi_state_owner(uaddr2, &q, current);
2350                         spin_unlock(q.lock_ptr);
2351                 }
2352         } else {
2353                 /*
2354                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2355                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2356                  * the pi_state.
2357                  */
2358                 WARN_ON(!q.pi_state);
2359                 pi_mutex = &q.pi_state->pi_mutex;
2360                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2361                 debug_rt_mutex_free_waiter(&rt_waiter);
2362
2363                 spin_lock(q.lock_ptr);
2364                 /*
2365                  * Fixup the pi_state owner and possibly acquire the lock if we
2366                  * haven't already.
2367                  */
2368                 res = fixup_owner(uaddr2, &q, !ret);
2369                 /*
2370                  * If fixup_owner() returned an error, proprogate that.  If it
2371                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2372                  */
2373                 if (res)
2374                         ret = (res < 0) ? res : 0;
2375
2376                 /* Unqueue and drop the lock. */
2377                 unqueue_me_pi(&q);
2378         }
2379
2380         /*
2381          * If fixup_pi_state_owner() faulted and was unable to handle the
2382          * fault, unlock the rt_mutex and return the fault to userspace.
2383          */
2384         if (ret == -EFAULT) {
2385                 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2386                         rt_mutex_unlock(pi_mutex);
2387         } else if (ret == -EINTR) {
2388                 /*
2389                  * We've already been requeued, but cannot restart by calling
2390                  * futex_lock_pi() directly. We could restart this syscall, but
2391                  * it would detect that the user space "val" changed and return
2392                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2393                  * -EWOULDBLOCK directly.
2394                  */
2395                 ret = -EWOULDBLOCK;
2396         }
2397
2398 out_put_keys:
2399         put_futex_key(&q.key);
2400 out_key2:
2401         put_futex_key(&key2);
2402
2403 out:
2404         if (to) {
2405                 hrtimer_cancel(&to->timer);
2406                 destroy_hrtimer_on_stack(&to->timer);
2407         }
2408         return ret;
2409 }
2410
2411 /*
2412  * Support for robust futexes: the kernel cleans up held futexes at
2413  * thread exit time.
2414  *
2415  * Implementation: user-space maintains a per-thread list of locks it
2416  * is holding. Upon do_exit(), the kernel carefully walks this list,
2417  * and marks all locks that are owned by this thread with the
2418  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2419  * always manipulated with the lock held, so the list is private and
2420  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2421  * field, to allow the kernel to clean up if the thread dies after
2422  * acquiring the lock, but just before it could have added itself to
2423  * the list. There can only be one such pending lock.
2424  */
2425
2426 /**
2427  * sys_set_robust_list() - Set the robust-futex list head of a task
2428  * @head:       pointer to the list-head
2429  * @len:        length of the list-head, as userspace expects
2430  */
2431 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2432                 size_t, len)
2433 {
2434         if (!futex_cmpxchg_enabled)
2435                 return -ENOSYS;
2436         /*
2437          * The kernel knows only one size for now:
2438          */
2439         if (unlikely(len != sizeof(*head)))
2440                 return -EINVAL;
2441
2442         current->robust_list = head;
2443
2444         return 0;
2445 }
2446
2447 /**
2448  * sys_get_robust_list() - Get the robust-futex list head of a task
2449  * @pid:        pid of the process [zero for current task]
2450  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2451  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2452  */
2453 SYSCALL_DEFINE3(get_robust_list, int, pid,
2454                 struct robust_list_head __user * __user *, head_ptr,
2455                 size_t __user *, len_ptr)
2456 {
2457         struct robust_list_head __user *head;
2458         unsigned long ret;
2459         struct task_struct *p;
2460
2461         if (!futex_cmpxchg_enabled)
2462                 return -ENOSYS;
2463
2464         rcu_read_lock();
2465
2466         ret = -ESRCH;
2467         if (!pid)
2468                 p = current;
2469         else {
2470                 p = find_task_by_vpid(pid);
2471                 if (!p)
2472                         goto err_unlock;
2473         }
2474
2475         ret = -EPERM;
2476         if (!ptrace_may_access(p, PTRACE_MODE_READ))
2477                 goto err_unlock;
2478
2479         head = p->robust_list;
2480         rcu_read_unlock();
2481
2482         if (put_user(sizeof(*head), len_ptr))
2483                 return -EFAULT;
2484         return put_user(head, head_ptr);
2485
2486 err_unlock:
2487         rcu_read_unlock();
2488
2489         return ret;
2490 }
2491
2492 /*
2493  * Process a futex-list entry, check whether it's owned by the
2494  * dying task, and do notification if so:
2495  */
2496 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2497 {
2498         u32 uval, uninitialized_var(nval), mval;
2499
2500 retry:
2501         if (get_user(uval, uaddr))
2502                 return -1;
2503
2504         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2505                 /*
2506                  * Ok, this dying thread is truly holding a futex
2507                  * of interest. Set the OWNER_DIED bit atomically
2508                  * via cmpxchg, and if the value had FUTEX_WAITERS
2509                  * set, wake up a waiter (if any). (We have to do a
2510                  * futex_wake() even if OWNER_DIED is already set -
2511                  * to handle the rare but possible case of recursive
2512                  * thread-death.) The rest of the cleanup is done in
2513                  * userspace.
2514                  */
2515                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2516                 /*
2517                  * We are not holding a lock here, but we want to have
2518                  * the pagefault_disable/enable() protection because
2519                  * we want to handle the fault gracefully. If the
2520                  * access fails we try to fault in the futex with R/W
2521                  * verification via get_user_pages. get_user() above
2522                  * does not guarantee R/W access. If that fails we
2523                  * give up and leave the futex locked.
2524                  */
2525                 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2526                         if (fault_in_user_writeable(uaddr))
2527                                 return -1;
2528                         goto retry;
2529                 }
2530                 if (nval != uval)
2531                         goto retry;
2532
2533                 /*
2534                  * Wake robust non-PI futexes here. The wakeup of
2535                  * PI futexes happens in exit_pi_state():
2536                  */
2537                 if (!pi && (uval & FUTEX_WAITERS))
2538                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2539         }
2540         return 0;
2541 }
2542
2543 /*
2544  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2545  */
2546 static inline int fetch_robust_entry(struct robust_list __user **entry,
2547                                      struct robust_list __user * __user *head,
2548                                      unsigned int *pi)
2549 {
2550         unsigned long uentry;
2551
2552         if (get_user(uentry, (unsigned long __user *)head))
2553                 return -EFAULT;
2554
2555         *entry = (void __user *)(uentry & ~1UL);
2556         *pi = uentry & 1;
2557
2558         return 0;
2559 }
2560
2561 /*
2562  * Walk curr->robust_list (very carefully, it's a userspace list!)
2563  * and mark any locks found there dead, and notify any waiters.
2564  *
2565  * We silently return on any sign of list-walking problem.
2566  */
2567 void exit_robust_list(struct task_struct *curr)
2568 {
2569         struct robust_list_head __user *head = curr->robust_list;
2570         struct robust_list __user *entry, *next_entry, *pending;
2571         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2572         unsigned int uninitialized_var(next_pi);
2573         unsigned long futex_offset;
2574         int rc;
2575
2576         if (!futex_cmpxchg_enabled)
2577                 return;
2578
2579         /*
2580          * Fetch the list head (which was registered earlier, via
2581          * sys_set_robust_list()):
2582          */
2583         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2584                 return;
2585         /*
2586          * Fetch the relative futex offset:
2587          */
2588         if (get_user(futex_offset, &head->futex_offset))
2589                 return;
2590         /*
2591          * Fetch any possibly pending lock-add first, and handle it
2592          * if it exists:
2593          */
2594         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2595                 return;
2596
2597         next_entry = NULL;      /* avoid warning with gcc */
2598         while (entry != &head->list) {
2599                 /*
2600                  * Fetch the next entry in the list before calling
2601                  * handle_futex_death:
2602                  */
2603                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2604                 /*
2605                  * A pending lock might already be on the list, so
2606                  * don't process it twice:
2607                  */
2608                 if (entry != pending)
2609                         if (handle_futex_death((void __user *)entry + futex_offset,
2610                                                 curr, pi))
2611                                 return;
2612                 if (rc)
2613                         return;
2614                 entry = next_entry;
2615                 pi = next_pi;
2616                 /*
2617                  * Avoid excessively long or circular lists:
2618                  */
2619                 if (!--limit)
2620                         break;
2621
2622                 cond_resched();
2623         }
2624
2625         if (pending)
2626                 handle_futex_death((void __user *)pending + futex_offset,
2627                                    curr, pip);
2628 }
2629
2630 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2631                 u32 __user *uaddr2, u32 val2, u32 val3)
2632 {
2633         int cmd = op & FUTEX_CMD_MASK;
2634         unsigned int flags = 0;
2635
2636         if (!(op & FUTEX_PRIVATE_FLAG))
2637                 flags |= FLAGS_SHARED;
2638
2639         if (op & FUTEX_CLOCK_REALTIME) {
2640                 flags |= FLAGS_CLOCKRT;
2641                 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2642                         return -ENOSYS;
2643         }
2644
2645         switch (cmd) {
2646         case FUTEX_LOCK_PI:
2647         case FUTEX_UNLOCK_PI:
2648         case FUTEX_TRYLOCK_PI:
2649         case FUTEX_WAIT_REQUEUE_PI:
2650         case FUTEX_CMP_REQUEUE_PI:
2651                 if (!futex_cmpxchg_enabled)
2652                         return -ENOSYS;
2653         }
2654
2655         switch (cmd) {
2656         case FUTEX_WAIT:
2657                 val3 = FUTEX_BITSET_MATCH_ANY;
2658         case FUTEX_WAIT_BITSET:
2659                 return futex_wait(uaddr, flags, val, timeout, val3);
2660         case FUTEX_WAKE:
2661                 val3 = FUTEX_BITSET_MATCH_ANY;
2662         case FUTEX_WAKE_BITSET:
2663                 return futex_wake(uaddr, flags, val, val3);
2664         case FUTEX_REQUEUE:
2665                 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2666         case FUTEX_CMP_REQUEUE:
2667                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2668         case FUTEX_WAKE_OP:
2669                 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2670         case FUTEX_LOCK_PI:
2671                 return futex_lock_pi(uaddr, flags, val, timeout, 0);
2672         case FUTEX_UNLOCK_PI:
2673                 return futex_unlock_pi(uaddr, flags);
2674         case FUTEX_TRYLOCK_PI:
2675                 return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2676         case FUTEX_WAIT_REQUEUE_PI:
2677                 val3 = FUTEX_BITSET_MATCH_ANY;
2678                 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2679                                              uaddr2);
2680         case FUTEX_CMP_REQUEUE_PI:
2681                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2682         }
2683         return -ENOSYS;
2684 }
2685
2686
2687 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2688                 struct timespec __user *, utime, u32 __user *, uaddr2,
2689                 u32, val3)
2690 {
2691         struct timespec ts;
2692         ktime_t t, *tp = NULL;
2693         u32 val2 = 0;
2694         int cmd = op & FUTEX_CMD_MASK;
2695
2696         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2697                       cmd == FUTEX_WAIT_BITSET ||
2698                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2699                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2700                         return -EFAULT;
2701                 if (!timespec_valid(&ts))
2702                         return -EINVAL;
2703
2704                 t = timespec_to_ktime(ts);
2705                 if (cmd == FUTEX_WAIT)
2706                         t = ktime_add_safe(ktime_get(), t);
2707                 tp = &t;
2708         }
2709         /*
2710          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2711          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2712          */
2713         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2714             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2715                 val2 = (u32) (unsigned long) utime;
2716
2717         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2718 }
2719
2720 static int __init futex_init(void)
2721 {
2722         u32 curval;
2723         unsigned long i;
2724
2725 #if CONFIG_BASE_SMALL
2726         futex_hashsize = 16;
2727 #else
2728         futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
2729 #endif
2730
2731         futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
2732                                                futex_hashsize, 0,
2733                                                futex_hashsize < 256 ? HASH_SMALL : 0,
2734                                                NULL, NULL, futex_hashsize, futex_hashsize);
2735
2736         /*
2737          * This will fail and we want it. Some arch implementations do
2738          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2739          * functionality. We want to know that before we call in any
2740          * of the complex code paths. Also we want to prevent
2741          * registration of robust lists in that case. NULL is
2742          * guaranteed to fault and we get -EFAULT on functional
2743          * implementation, the non-functional ones will return
2744          * -ENOSYS.
2745          */
2746         if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2747                 futex_cmpxchg_enabled = 1;
2748
2749         for (i = 0; i < futex_hashsize; i++) {
2750                 plist_head_init(&futex_queues[i].chain);
2751                 spin_lock_init(&futex_queues[i].lock);
2752         }
2753
2754         return 0;
2755 }
2756 __initcall(futex_init);