]> Pileus Git - ~andy/linux/commitdiff
mm: numa: limit scope of lock for NUMA migrate rate limiting
authorMel Gorman <mgorman@suse.de>
Tue, 21 Jan 2014 23:50:59 +0000 (15:50 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Jan 2014 00:19:48 +0000 (16:19 -0800)
NUMA migrate rate limiting protects a migration counter and window using
a lock but in some cases this can be a contended lock.  It is not
critical that the number of pages be perfect, lost updates are
acceptable.  Reduce the importance of this lock.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/migrate.c

index 67ab5febabf7e3be9764b74b9cb4ef6c8be00260..5f2052c831547a33b66606d78ac5713086d285e3 100644 (file)
@@ -764,10 +764,7 @@ typedef struct pglist_data {
        int kswapd_max_order;
        enum zone_type classzone_idx;
 #ifdef CONFIG_NUMA_BALANCING
-       /*
-        * Lock serializing the per destination node AutoNUMA memory
-        * migration rate limiting data.
-        */
+       /* Lock serializing the migrate rate limiting window */
        spinlock_t numabalancing_migrate_lock;
 
        /* Rate limiting time interval */
index 41eba21f10ba038f3d7147071c05e8746826ec82..4612bb2e3677d015c839e48d6b9d96f9fb353f7e 100644 (file)
@@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node)
 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
                                        unsigned long nr_pages)
 {
-       bool rate_limited = false;
-
        /*
         * Rate-limit the amount of data that is being migrated to a node.
         * Optimal placement is no good if the memory bus is saturated and
         * all the time is being spent migrating!
         */
-       spin_lock(&pgdat->numabalancing_migrate_lock);
        if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
+               spin_lock(&pgdat->numabalancing_migrate_lock);
                pgdat->numabalancing_migrate_nr_pages = 0;
                pgdat->numabalancing_migrate_next_window = jiffies +
                        msecs_to_jiffies(migrate_interval_millisecs);
+               spin_unlock(&pgdat->numabalancing_migrate_lock);
        }
        if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
-               rate_limited = true;
-       else
-               pgdat->numabalancing_migrate_nr_pages += nr_pages;
-       spin_unlock(&pgdat->numabalancing_migrate_lock);
-       
-       return rate_limited;
+               return true;
+
+       /*
+        * This is an unlocked non-atomic update so errors are possible.
+        * The consequences are failing to migrate when we potentiall should
+        * have which is not severe enough to warrant locking. If it is ever
+        * a problem, it can be converted to a per-cpu counter.
+        */
+       pgdat->numabalancing_migrate_nr_pages += nr_pages;
+       return false;
 }
 
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)