binder: switch alloc->mutex to spinlock_t
The alloc->mutex is a highly contended lock that causes performance issues on Android devices. When a low-priority task is given this lock and it sleeps, it becomes difficult for the task to wake up and complete its work. This delays other tasks that are also waiting on the mutex. The problem gets worse when there is memory pressure in the system, because this increases the contention on the alloc->mutex while the shrinker reclaims binder pages. Switching to a spinlock helps to keep the waiters running and avoids the overhead of waking up tasks. This significantly improves the transaction latency when the problematic scenario occurs. The performance impact of this patchset was measured by stress-testing the binder alloc contention. In this test, several clients of different priorities send thousands of transactions of different sizes to a single server. In parallel, pages get reclaimed using the shinker's debugfs. The test was run on a Pixel 8, Pixel 6 and qemu machine. The results were similar on all three devices: after: | sched | prio | average | max | min | |--------+------+---------+-----------+---------| | fifo | 99 | 0.135ms | 1.197ms | 0.022ms | | fifo | 01 | 0.136ms | 5.232ms | 0.018ms | | other | -20 | 0.180ms | 7.403ms | 0.019ms | | other | 19 | 0.241ms | 58.094ms | 0.018ms | before: | sched | prio | average | max | min | |--------+------+---------+-----------+---------| | fifo | 99 | 0.350ms | 248.730ms | 0.020ms | | fifo | 01 | 0.357ms | 248.817ms | 0.024ms | | other | -20 | 0.399ms | 249.906ms | 0.020ms | | other | 19 | 0.477ms | 297.756ms | 0.022ms | The key metrics above are the average and max latencies (wall time). These improvements should roughly translate to p95-p99 latencies on real workloads. The response time is up to 200x faster in these scenarios and there is no penalty in the regular path. Note that it is only possible to convert this lock after a series of changes made by previous patches. These mainly include refactoring the sections that might_sleep() and changing the locking order with the mmap_lock amongst others. Reviewed-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-29-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e50f4e6cc9
commit
7710e2cca3
|
@ -169,9 +169,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
|||
{
|
||||
struct binder_buffer *buffer;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
@ -597,10 +597,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|||
if (!next)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
|
||||
if (IS_ERR(buffer)) {
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|||
buffer->offsets_size = offsets_size;
|
||||
buffer->extra_buffers_size = extra_buffers_size;
|
||||
buffer->pid = current->tgid;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
|
||||
ret = binder_install_buffer_pages(alloc, buffer, size);
|
||||
if (ret) {
|
||||
|
@ -785,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
|
|||
* We could eliminate the call to binder_alloc_clear_buf()
|
||||
* from binder_alloc_deferred_release() by moving this to
|
||||
* binder_free_buf_locked(). However, that could
|
||||
* increase contention for the alloc mutex if clear_on_free
|
||||
* is used frequently for large buffers. The mutex is not
|
||||
* increase contention for the alloc->lock if clear_on_free
|
||||
* is used frequently for large buffers. This lock is not
|
||||
* needed for correctness here.
|
||||
*/
|
||||
if (buffer->clear_on_free) {
|
||||
binder_alloc_clear_buf(alloc, buffer);
|
||||
buffer->clear_on_free = false;
|
||||
}
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
binder_free_buf_locked(alloc, buffer);
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -893,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
struct binder_buffer *buffer;
|
||||
|
||||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
BUG_ON(alloc->vma);
|
||||
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
|
@ -943,7 +943,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
}
|
||||
kfree(alloc->pages);
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
if (alloc->mm)
|
||||
mmdrop(alloc->mm);
|
||||
|
||||
|
@ -966,7 +966,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
|
|||
struct binder_buffer *buffer;
|
||||
struct rb_node *n;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
|
||||
|
@ -976,7 +976,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
|
|||
buffer->extra_buffers_size,
|
||||
buffer->transaction ? "active" : "delivered");
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -993,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m,
|
|||
int lru = 0;
|
||||
int free = 0;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
/*
|
||||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
|
@ -1009,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m,
|
|||
lru++;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
}
|
||||
|
@ -1025,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
|||
struct rb_node *n;
|
||||
int count = 0;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
|
||||
count++;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -1073,8 +1073,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
goto err_mmget;
|
||||
if (!mmap_read_trylock(mm))
|
||||
goto err_mmap_read_lock_failed;
|
||||
if (!mutex_trylock(&alloc->mutex))
|
||||
goto err_get_alloc_mutex_failed;
|
||||
if (!spin_trylock(&alloc->lock))
|
||||
goto err_get_alloc_lock_failed;
|
||||
if (!page->page_ptr)
|
||||
goto err_page_already_freed;
|
||||
|
||||
|
@ -1093,7 +1093,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
trace_binder_unmap_kernel_end(alloc, index);
|
||||
|
||||
list_lru_isolate(lru, item);
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
spin_unlock(lock);
|
||||
|
||||
if (vma) {
|
||||
|
@ -1113,8 +1113,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
|
||||
err_invalid_vma:
|
||||
err_page_already_freed:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
err_get_alloc_mutex_failed:
|
||||
spin_unlock(&alloc->lock);
|
||||
err_get_alloc_lock_failed:
|
||||
mmap_read_unlock(mm);
|
||||
err_mmap_read_lock_failed:
|
||||
mmput_async(mm);
|
||||
|
@ -1149,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
|
|||
alloc->pid = current->group_leader->pid;
|
||||
alloc->mm = current->mm;
|
||||
mmgrab(alloc->mm);
|
||||
mutex_init(&alloc->mutex);
|
||||
spin_lock_init(&alloc->lock);
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/rbtree.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rtmutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list_lru.h>
|
||||
|
@ -72,7 +72,7 @@ struct binder_lru_page {
|
|||
|
||||
/**
|
||||
* struct binder_alloc - per-binder proc state for binder allocator
|
||||
* @mutex: protects binder_alloc fields
|
||||
* @lock: protects binder_alloc fields
|
||||
* @vma: vm_area_struct passed to mmap_handler
|
||||
* (invariant after mmap)
|
||||
* @mm: copy of task->mm (invariant after open)
|
||||
|
@ -96,7 +96,7 @@ struct binder_lru_page {
|
|||
* struct binder_buffer objects used to track the user buffers
|
||||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
spinlock_t lock;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
unsigned long buffer;
|
||||
|
@ -153,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
|
|||
{
|
||||
size_t free_async_space;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
spin_lock(&alloc->lock);
|
||||
free_async_space = alloc->free_async_space;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
spin_unlock(&alloc->lock);
|
||||
return free_async_space;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue