]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'freezer'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 28 Jun 2013 11:00:53 +0000 (13:00 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 28 Jun 2013 11:00:53 +0000 (13:00 +0200)
* freezer:
  af_unix: use freezable blocking calls in read
  sigtimedwait: use freezable blocking call
  nanosleep: use freezable blocking call
  futex: use freezable blocking call
  select: use freezable blocking call
  epoll: use freezable blocking call
  binder: use freezable blocking calls
  freezer: add new freezable helpers using freezer_do_not_count()
  freezer: convert freezable helpers to static inline where possible
  freezer: convert freezable helpers to freezer_do_not_count()
  freezer: skip waking up tasks with PF_FREEZER_SKIP set
  freezer: shorten freezer sleep time using exponential backoff
  lockdep: check that no locks held at freeze time
  lockdep: remove task argument from debug_check_no_locks_held
  freezer: add unsafe versions of freezable helpers for CIFS
  freezer: add unsafe versions of freezable helpers for NFS

18 files changed:
drivers/staging/android/binder.c
fs/cifs/transport.c
fs/eventpoll.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/select.c
include/linux/debug_locks.h
include/linux/freezer.h
kernel/exit.c
kernel/freezer.c
kernel/futex.c
kernel/hrtimer.c
kernel/lockdep.c
kernel/power/process.c
kernel/signal.c
net/sunrpc/sched.c
net/unix/af_unix.c

index 1567ac296b393163225237c830c48a726e529b4e..1ffc2ebdf612df7afbf90371e7ba4a4b226131b3 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <linux/fdtable.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/list.h>
 #include <linux/miscdevice.h>
@@ -2140,13 +2141,13 @@ retry:
                        if (!binder_has_proc_work(proc, thread))
                                ret = -EAGAIN;
                } else
-                       ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+                       ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        } else {
                if (non_block) {
                        if (!binder_has_thread_work(thread))
                                ret = -EAGAIN;
                } else
-                       ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+                       ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
        }
 
        binder_lock(__func__);
index bfbf4700d160f8a4d54f7cc0446a35c4de8c747d..b70aa7c913940c3766263b67e564a053883f9f32 100644 (file)
@@ -447,7 +447,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 {
        int error;
 
-       error = wait_event_freezekillable(server->response_q,
+       error = wait_event_freezekillable_unsafe(server->response_q,
                                    midQ->mid_state != MID_REQUEST_SUBMITTED);
        if (error < 0)
                return -ERESTARTSYS;
index deecc7294a672c3fa64cf3884c4ee28f4c7f80a7..0cff4434880d1fff3b799e6c1e2df6328b7b5282 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1602,7 +1603,8 @@ fetch_events:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                       if (!freezable_schedule_hrtimeout_range(to, slack,
+                                                               HRTIMER_MODE_ABS))
                                timed_out = 1;
 
                        spin_lock_irqsave(&ep->lock, flags);
index c1c7a9d78722257867846f39c74780536b28d0c5..ce727047ee87786bcf4d20d8573a24a0990e2e62 100644 (file)
@@ -79,7 +79,7 @@ int nfs_wait_bit_killable(void *word)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
-       freezable_schedule();
+       freezable_schedule_unsafe();
        return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
index 43ea96ced28cbc763a60048ab95cefcf737ecf40..ce90eb4775c2f888547bd98911062f8eede4af97 100644 (file)
@@ -33,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+               freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
        } while (!fatal_signal_pending(current));
        return res;
index d7ba5616989c49fe52d396d41187a663744598f1..28241a42f363581341b3ed81de137fc0da1b428e 100644 (file)
@@ -268,7 +268,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
                *timeout = NFS4_POLL_RETRY_MIN;
        if (*timeout > NFS4_POLL_RETRY_MAX)
                *timeout = NFS4_POLL_RETRY_MAX;
-       freezable_schedule_timeout_killable(*timeout);
+       freezable_schedule_timeout_killable_unsafe(*timeout);
        if (fatal_signal_pending(current))
                res = -ERESTARTSYS;
        *timeout <<= 1;
@@ -4528,7 +4528,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       freezable_schedule_timeout_killable(timeout);
+       freezable_schedule_timeout_killable_unsafe(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
index 8c1c96c27062a504bfc151f33d335fcd8f6779b5..6b14dc7df3a46df3293da538abc2d65b3b67f94f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
 #include <linux/sched/rt.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -236,7 +237,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 
        set_current_state(state);
        if (!pwq->triggered)
-               rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
+               rc = freezable_schedule_hrtimeout_range(expires, slack,
+                                                       HRTIMER_MODE_ABS);
        __set_current_state(TASK_RUNNING);
 
        /*
index 21ca773f77bf7fd2543b111371279c4ef749722d..822c1354f3a69dae20e7f6cacaf340b40ea9d7c4 100644 (file)
@@ -51,7 +51,7 @@ struct task_struct;
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
 }
 
 static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
 {
 }
 #endif
index e70df40d84f6fe83c72f732aa44b454148661b6e..7fd81b8c48971676cd52362f09722dfd2dd28153 100644 (file)
@@ -3,6 +3,7 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
+#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -46,7 +47,11 @@ extern int freeze_kernel_threads(void);
 extern void thaw_processes(void);
 extern void thaw_kernel_threads(void);
 
-static inline bool try_to_freeze(void)
+/*
+ * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
+ * If try_to_freeze causes a lockdep warning it means the caller may deadlock
+ */
+static inline bool try_to_freeze_unsafe(void)
 {
        might_sleep();
        if (likely(!freezing(current)))
@@ -54,6 +59,13 @@ static inline bool try_to_freeze(void)
        return __refrigerator(false);
 }
 
+static inline bool try_to_freeze(void)
+{
+       if (!(current->flags & PF_NOFREEZE))
+               debug_check_no_locks_held();
+       return try_to_freeze_unsafe();
+}
+
 extern bool freeze_task(struct task_struct *p);
 extern bool set_freezable(void);
 
@@ -115,6 +127,14 @@ static inline void freezer_count(void)
        try_to_freeze();
 }
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezer_count_unsafe(void)
+{
+       current->flags &= ~PF_FREEZER_SKIP;
+       smp_mb();
+       try_to_freeze_unsafe();
+}
+
 /**
  * freezer_should_skip - whether to skip a task when determining frozen
  *                      state is reached
@@ -139,28 +159,86 @@ static inline bool freezer_should_skip(struct task_struct *p)
 }
 
 /*
- * These macros are intended to be used whenever you want allow a sleeping
+ * These functions are intended to be used whenever you want allow a sleeping
  * task to be frozen. Note that neither return any clear indication of
  * whether a freeze event happened while in this function.
  */
 
 /* Like schedule(), but should not block the freezer. */
-#define freezable_schedule()                                           \
-({                                                                     \
-       freezer_do_not_count();                                         \
-       schedule();                                                     \
-       freezer_count();                                                \
-})
+static inline void freezable_schedule(void)
+{
+       freezer_do_not_count();
+       schedule();
+       freezer_count();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezable_schedule_unsafe(void)
+{
+       freezer_do_not_count();
+       schedule();
+       freezer_count_unsafe();
+}
+
+/*
+ * Like freezable_schedule_timeout(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout(timeout);
+       freezer_count();
+       return __retval;
+}
+
+/*
+ * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout_interruptible(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_interruptible(timeout);
+       freezer_count();
+       return __retval;
+}
 
 /* Like schedule_timeout_killable(), but should not block the freezer. */
-#define freezable_schedule_timeout_killable(timeout)                   \
-({                                                                     \
-       long __retval;                                                  \
-       freezer_do_not_count();                                         \
-       __retval = schedule_timeout_killable(timeout);                  \
-       freezer_count();                                                \
-       __retval;                                                       \
-})
+static inline long freezable_schedule_timeout_killable(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_killable(timeout);
+       freezer_count();
+       return __retval;
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_killable(timeout);
+       freezer_count_unsafe();
+       return __retval;
+}
+
+/*
+ * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
+               unsigned long delta, const enum hrtimer_mode mode)
+{
+       int __retval;
+       freezer_do_not_count();
+       __retval = schedule_hrtimeout_range(expires, delta, mode);
+       freezer_count();
+       return __retval;
+}
 
 /*
  * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -177,33 +255,45 @@ static inline bool freezer_should_skip(struct task_struct *p)
        __retval;                                                       \
 })
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+#define wait_event_freezekillable_unsafe(wq, condition)                        \
+({                                                                     \
+       int __retval;                                                   \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_killable(wq, (condition));                \
+       freezer_count_unsafe();                                         \
+       __retval;                                                       \
+})
+
 #define wait_event_freezable(wq, condition)                            \
 ({                                                                     \
        int __retval;                                                   \
-       for (;;) {                                                      \
-               __retval = wait_event_interruptible(wq,                 \
-                               (condition) || freezing(current));      \
-               if (__retval || (condition))                            \
-                       break;                                          \
-               try_to_freeze();                                        \
-       }                                                               \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible(wq, (condition));           \
+       freezer_count();                                                \
        __retval;                                                       \
 })
 
 #define wait_event_freezable_timeout(wq, condition, timeout)           \
 ({                                                                     \
        long __retval = timeout;                                        \
-       for (;;) {                                                      \
-               __retval = wait_event_interruptible_timeout(wq,         \
-                               (condition) || freezing(current),       \
-                               __retval);                              \
-               if (__retval <= 0 || (condition))                       \
-                       break;                                          \
-               try_to_freeze();                                        \
-       }                                                               \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible_timeout(wq, (condition),    \
+                               __retval);                              \
+       freezer_count();                                                \
        __retval;                                                       \
 })
 
+#define wait_event_freezable_exclusive(wq, condition)                  \
+({                                                                     \
+       int __retval;                                                   \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible_exclusive(wq, condition);   \
+       freezer_count();                                                \
+       __retval;                                                       \
+})
+
+
 #else /* !CONFIG_FREEZER */
 static inline bool frozen(struct task_struct *p) { return false; }
 static inline bool freezing(struct task_struct *p) { return false; }
@@ -225,18 +315,37 @@ static inline void set_freezable(void) {}
 
 #define freezable_schedule()  schedule()
 
+#define freezable_schedule_unsafe()  schedule()
+
+#define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
+
+#define freezable_schedule_timeout_interruptible(timeout)              \
+       schedule_timeout_interruptible(timeout)
+
 #define freezable_schedule_timeout_killable(timeout)                   \
        schedule_timeout_killable(timeout)
 
+#define freezable_schedule_timeout_killable_unsafe(timeout)            \
+       schedule_timeout_killable(timeout)
+
+#define freezable_schedule_hrtimeout_range(expires, delta, mode)       \
+       schedule_hrtimeout_range(expires, delta, mode)
+
 #define wait_event_freezable(wq, condition)                            \
                wait_event_interruptible(wq, condition)
 
 #define wait_event_freezable_timeout(wq, condition, timeout)           \
                wait_event_interruptible_timeout(wq, condition, timeout)
 
+#define wait_event_freezable_exclusive(wq, condition)                  \
+               wait_event_interruptible_exclusive(wq, condition)
+
 #define wait_event_freezekillable(wq, condition)               \
                wait_event_killable(wq, condition)
 
+#define wait_event_freezekillable_unsafe(wq, condition)                        \
+               wait_event_killable(wq, condition)
+
 #endif /* !CONFIG_FREEZER */
 
 #endif /* FREEZER_H_INCLUDED */
index 7bb73f9d09dbeedcc6c07f6a8dc8257f76fc4963..6a057750ebbbdee49b9c2c2b578567401fd8540f 100644 (file)
@@ -835,7 +835,7 @@ void do_exit(long code)
        /*
         * Make sure we are holding no locks:
         */
-       debug_check_no_locks_held(tsk);
+       debug_check_no_locks_held();
        /*
         * We can do this unlocked here. The futex code uses this flag
         * just to verify whether the pi state cleanup has been done
index c38893b0efbaa39d770b11546dba9e97ec53ff51..8b2afc1c9df0c698eccd86664d25c33ed647fea7 100644 (file)
@@ -110,6 +110,18 @@ bool freeze_task(struct task_struct *p)
 {
        unsigned long flags;
 
+       /*
+        * This check can race with freezer_do_not_count, but worst case that
+        * will result in an extra wakeup being sent to the task.  It does not
+        * race with freezer_count(), the barriers in freezer_count() and
+        * freezer_should_skip() ensure that either freezer_count() sees
+        * freezing == true in try_to_freeze() and freezes, or
+        * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
+        * normally.
+        */
+       if (freezer_should_skip(p))
+               return false;
+
        spin_lock_irqsave(&freezer_lock, flags);
        if (!freezing(p) || frozen(p)) {
                spin_unlock_irqrestore(&freezer_lock, flags);
index b26dcfc02c9489b3ca00bfd076f2208bb4964366..d710fae8abbe933b97267b69ff2e27864d1291a4 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/nsproxy.h>
 #include <linux/ptrace.h>
 #include <linux/sched/rt.h>
+#include <linux/freezer.h>
 
 #include <asm/futex.h>
 
@@ -1807,7 +1808,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
                 * is no timeout, or if it has yet to expire.
                 */
                if (!timeout || timeout->task)
-                       schedule();
+                       freezable_schedule();
        }
        __set_current_state(TASK_RUNNING);
 }
index fd4b13b131f8db23fb17055caf5d33e23bec7b50..3ee4d06c6fc20bda08fa052a1c42d70c7d094cb8 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -1545,7 +1546,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
                        t->task = NULL;
 
                if (likely(t->task))
-                       schedule();
+                       freezable_schedule();
 
                hrtimer_cancel(&t->timer);
                mode = HRTIMER_MODE_ABS;
index 1f3186b37fd5390be1534f895eb413de19e0a6d7..e16c45b9ee77054f80becd37a51da00776f796c1 100644 (file)
@@ -4090,7 +4090,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(struct task_struct *curr)
+static void print_held_locks_bug(void)
 {
        if (!debug_locks_off())
                return;
@@ -4099,22 +4099,21 @@ static void print_held_locks_bug(struct task_struct *curr)
 
        printk("\n");
        printk("=====================================\n");
-       printk("[ BUG: lock held at task exit time! ]\n");
+       printk("[ BUG: %s/%d still has locks held! ]\n",
+              current->comm, task_pid_nr(current));
        print_kernel_ident();
        printk("-------------------------------------\n");
-       printk("%s/%d is exiting with locks still held!\n",
-               curr->comm, task_pid_nr(curr));
-       lockdep_print_held_locks(curr);
-
+       lockdep_print_held_locks(current);
        printk("\nstack backtrace:\n");
        dump_stack();
 }
 
-void debug_check_no_locks_held(struct task_struct *task)
+void debug_check_no_locks_held(void)
 {
-       if (unlikely(task->lockdep_depth > 0))
-               print_held_locks_bug(task);
+       if (unlikely(current->lockdep_depth > 0))
+               print_held_locks_bug();
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
index 98088e0e71e83a3b9cd157c5415f9e79083592c0..fc0df84864495f8c44261961cb6909e66d24a21e 100644 (file)
@@ -30,9 +30,10 @@ static int try_to_freeze_tasks(bool user_only)
        unsigned int todo;
        bool wq_busy = false;
        struct timeval start, end;
-       u64 elapsed_csecs64;
-       unsigned int elapsed_csecs;
+       u64 elapsed_msecs64;
+       unsigned int elapsed_msecs;
        bool wakeup = false;
+       int sleep_usecs = USEC_PER_MSEC;
 
        do_gettimeofday(&start);
 
@@ -68,22 +69,25 @@ static int try_to_freeze_tasks(bool user_only)
 
                /*
                 * We need to retry, but first give the freezing tasks some
-                * time to enter the refrigerator.
+                * time to enter the refrigerator.  Start with an initial
+                * 1 ms sleep followed by exponential backoff until 8 ms.
                 */
-               msleep(10);
+               usleep_range(sleep_usecs / 2, sleep_usecs);
+               if (sleep_usecs < 8 * USEC_PER_MSEC)
+                       sleep_usecs *= 2;
        }
 
        do_gettimeofday(&end);
-       elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
-       do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
-       elapsed_csecs = elapsed_csecs64;
+       elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+       do_div(elapsed_msecs64, NSEC_PER_MSEC);
+       elapsed_msecs = elapsed_msecs64;
 
        if (todo) {
                printk("\n");
-               printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
+               printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
                       "(%d tasks refusing to freeze, wq_busy=%d):\n",
                       wakeup ? "aborted" : "failed",
-                      elapsed_csecs / 100, elapsed_csecs % 100,
+                      elapsed_msecs / 1000, elapsed_msecs % 1000,
                       todo - wq_busy, wq_busy);
 
                if (!wakeup) {
@@ -96,8 +100,8 @@ static int try_to_freeze_tasks(bool user_only)
                        read_unlock(&tasklist_lock);
                }
        } else {
-               printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
-                       elapsed_csecs % 100);
+               printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+                       elapsed_msecs % 1000);
        }
 
        return todo ? -EBUSY : 0;
index 113411bfe8b1205ad0f26556776f2a317e06eb0c..50e41075ac77105fd26d190b2068929af6c4becc 100644 (file)
@@ -2848,7 +2848,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
                recalc_sigpending();
                spin_unlock_irq(&tsk->sighand->siglock);
 
-               timeout = schedule_timeout_interruptible(timeout);
+               timeout = freezable_schedule_timeout_interruptible(timeout);
 
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
index 5356b120dbf8e2fe61ba88081d1fab1941f135a9..77d251e0259315eeef4acd4de50def4382bba5e1 100644 (file)
@@ -254,7 +254,7 @@ static int rpc_wait_bit_killable(void *word)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
-       freezable_schedule();
+       freezable_schedule_unsafe();
        return 0;
 }
 
index 826e09938bff4203def6e5af33db49c4360f3019..c4ce243824bb19c6e600dd98da2d8a6d16747dc1 100644 (file)
 #include <linux/mount.h>
 #include <net/checksum.h>
 #include <linux/security.h>
+#include <linux/freezer.h>
 
 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -1879,7 +1880,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
 
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
-               timeo = schedule_timeout(timeo);
+               timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }