]> Pileus Git - ~andy/linux/commitdiff
staging: ion: Avoid using rt_mutexes directly
authorJohn Stultz <john.stultz@linaro.org>
Wed, 18 Dec 2013 01:04:29 +0000 (17:04 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Dec 2013 01:16:51 +0000 (17:16 -0800)
RT_MUTEXES can be configured out of the kernel, causing compile
problems with ION.

To quote Colin:
"rt_mutexes were added with the deferred freeing feature.  Heaps need
to return zeroed memory to userspace, but zeroing the memory on every
allocation was causing performance issues.  We added a SCHED_IDLE
thread to zero memory in the background after freeing, but locking the
heap from the SCHED_IDLE thread might block a high priority allocation
thread for a long time.

The lock is only used to protect the heap's free_list and
free_list_size members, and is not held for any long or sleeping
operations.  Converting to a spinlock should prevent priority
inversion without using the rt_mutex.  I'd also rename it to free_lock
to so it doesn't get used as a general heap lock."

Thus this patch converts the rt_mutex usage to a spinlock and
renames the lock free_lock to be more clear as to its use.

I also had to change a bit of logic in ion_heap_freelist_drain()
to safely avoid list corruption.

Acked-by: Colin Cross <ccross@android.com>
Cc: Android Kernel Team <kernel-team@android.com>
Reported-by: Jim Davis <jim.epost@gmail.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/android/ion/ion_heap.c
drivers/staging/android/ion/ion_priv.h

index 9cf562224386ef245be0fc3d3460cbeda667f3a4..296c74f98dc08c6cabbd8203fc4b535b59aedf30 100644 (file)
@@ -160,10 +160,10 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
 
 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
 {
-       rt_mutex_lock(&heap->lock);
+       spin_lock(&heap->free_lock);
        list_add(&buffer->list, &heap->free_list);
        heap->free_list_size += buffer->size;
-       rt_mutex_unlock(&heap->lock);
+       spin_unlock(&heap->free_lock);
        wake_up(&heap->waitqueue);
 }
 
@@ -171,34 +171,38 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
 {
        size_t size;
 
-       rt_mutex_lock(&heap->lock);
+       spin_lock(&heap->free_lock);
        size = heap->free_list_size;
-       rt_mutex_unlock(&heap->lock);
+       spin_unlock(&heap->free_lock);
 
        return size;
 }
 
 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
 {
-       struct ion_buffer *buffer, *tmp;
+       struct ion_buffer *buffer;
        size_t total_drained = 0;
 
        if (ion_heap_freelist_size(heap) == 0)
                return 0;
 
-       rt_mutex_lock(&heap->lock);
+       spin_lock(&heap->free_lock);
        if (size == 0)
                size = heap->free_list_size;
 
-       list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+       while (!list_empty(&heap->free_list)) {
                if (total_drained >= size)
                        break;
+               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+                                         list);
                list_del(&buffer->list);
                heap->free_list_size -= buffer->size;
                total_drained += buffer->size;
+               spin_unlock(&heap->free_lock);
                ion_buffer_destroy(buffer);
+               spin_lock(&heap->free_lock);
        }
-       rt_mutex_unlock(&heap->lock);
+       spin_unlock(&heap->free_lock);
 
        return total_drained;
 }
@@ -213,16 +217,16 @@ static int ion_heap_deferred_free(void *data)
                wait_event_freezable(heap->waitqueue,
                                     ion_heap_freelist_size(heap) > 0);
 
-               rt_mutex_lock(&heap->lock);
+               spin_lock(&heap->free_lock);
                if (list_empty(&heap->free_list)) {
-                       rt_mutex_unlock(&heap->lock);
+                       spin_unlock(&heap->free_lock);
                        continue;
                }
                buffer = list_first_entry(&heap->free_list, struct ion_buffer,
                                          list);
                list_del(&buffer->list);
                heap->free_list_size -= buffer->size;
-               rt_mutex_unlock(&heap->lock);
+               spin_unlock(&heap->free_lock);
                ion_buffer_destroy(buffer);
        }
 
@@ -235,7 +239,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
 
        INIT_LIST_HEAD(&heap->free_list);
        heap->free_list_size = 0;
-       rt_mutex_init(&heap->lock);
+       spin_lock_init(&heap->free_lock);
        init_waitqueue_head(&heap->waitqueue);
        heap->task = kthread_run(ion_heap_deferred_free, heap,
                                 "%s", heap->name);
index fc8a4c3cee5d53edb56b5702ee31c990e614660b..d98673981cc40cc83b70db1b5aa9a49a99b4ad35 100644 (file)
@@ -159,7 +159,7 @@ struct ion_heap {
        struct shrinker shrinker;
        struct list_head free_list;
        size_t free_list_size;
-       struct rt_mutex lock;
+       spinlock_t free_lock;
        wait_queue_head_t waitqueue;
        struct task_struct *task;
        int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);