binder: remove pid param in binder_alloc_new_buf()
authorCarlos Llamas <cmllamas@google.com>
Fri, 1 Dec 2023 17:21:41 +0000 (17:21 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 5 Dec 2023 00:23:39 +0000 (09:23 +0900)
Binder attributes the buffer allocation to the current->tgid everytime.
There is no need to pass this as a parameter so drop it.

Also add a few touchups to follow the coding guidelines. No functional
changes are introduced in this patch.

Reviewed-by: Alice Ryhl <aliceryhl@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20231201172212.1813387-13-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c

index 437d1097b118ebf00e603e2aca9e1c0f53129ea4..45674af6310fe84b31dfd68bad37814459447879 100644 (file)
@@ -3225,7 +3225,7 @@ static void binder_transaction(struct binder_proc *proc,
 
        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
-               !reply && (t->flags & TF_ONE_WAY), current->tgid);
+               !reply && (t->flags & TF_ONE_WAY));
        if (IS_ERR(t->buffer)) {
                char *s;
 
index 40a2ca0c0dea8056b705fbba099b4f7f93c8191b..b5c3e56318e14af81a77f65631591695983079ad 100644 (file)
@@ -319,7 +319,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
        return smp_load_acquire(&alloc->vma);
 }
 
-static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static bool debug_low_async_space_locked(struct binder_alloc *alloc)
 {
        /*
         * Find the amount and size of buffers allocated by the current caller;
@@ -328,10 +328,11 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
         * and at some point we'll catch them in the act. This is more efficient
         * than keeping a map per pid.
         */
-       struct rb_node *n;
        struct binder_buffer *buffer;
        size_t total_alloc_size = 0;
+       int pid = current->tgid;
        size_t num_buffers = 0;
+       struct rb_node *n;
 
        for (n = rb_first(&alloc->allocated_buffers); n != NULL;
                 n = rb_next(n)) {
@@ -364,8 +365,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t size,
-                               int is_async,
-                               int pid)
+                               int is_async)
 {
        struct rb_node *n = alloc->free_buffers.rb_node;
        struct binder_buffer *buffer;
@@ -476,7 +476,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
                     "%d: binder_alloc_buf size %zd got %pK\n",
                      alloc->pid, size, buffer);
        buffer->async_transaction = is_async;
-       buffer->pid = pid;
        buffer->oneway_spam_suspect = false;
        if (is_async) {
                alloc->free_async_space -= size;
@@ -489,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
                         * of async space left (which is less than 10% of total
                         * buffer size).
                         */
-                       buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
+                       buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc);
                } else {
                        alloc->oneway_spam_detected = false;
                }
@@ -532,7 +531,6 @@ static inline size_t sanitized_size(size_t data_size,
  * @offsets_size:       user specified buffer offset
  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  * @is_async:           buffer for async transaction
- * @pid:                               pid to attribute allocation to (used for debugging)
  *
  * Allocate a new buffer given the requested sizes. Returns
  * the kernel version of the buffer pointer. The size allocated
@@ -545,8 +543,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                                           size_t data_size,
                                           size_t offsets_size,
                                           size_t extra_buffers_size,
-                                          int is_async,
-                                          int pid)
+                                          int is_async)
 {
        struct binder_buffer *buffer;
        size_t size;
@@ -569,7 +566,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
        }
 
        mutex_lock(&alloc->mutex);
-       buffer = binder_alloc_new_buf_locked(alloc, size, is_async, pid);
+       buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
        if (IS_ERR(buffer)) {
                mutex_unlock(&alloc->mutex);
                goto out;
@@ -578,6 +575,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
        buffer->data_size = data_size;
        buffer->offsets_size = offsets_size;
        buffer->extra_buffers_size = extra_buffers_size;
+       buffer->pid = current->tgid;
        mutex_unlock(&alloc->mutex);
 
 out:
index cb19677a5c15390b54cefd408d5e89b9cb20079d..bbc16bc6d5ac98694562b588919e1854543aa043 100644 (file)
@@ -49,15 +49,13 @@ struct binder_buffer {
        unsigned async_transaction:1;
        unsigned oneway_spam_suspect:1;
        unsigned debug_id:27;
-
        struct binder_transaction *transaction;
-
        struct binder_node *target_node;
        size_t data_size;
        size_t offsets_size;
        size_t extra_buffers_size;
        unsigned long user_data;
-       int    pid;
+       int pid;
 };
 
 /**
@@ -125,8 +123,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                                           size_t data_size,
                                           size_t offsets_size,
                                           size_t extra_buffers_size,
-                                          int is_async,
-                                          int pid);
+                                          int is_async);
 void binder_alloc_init(struct binder_alloc *alloc);
 int binder_alloc_shrinker_init(void);
 void binder_alloc_shrinker_exit(void);
index 341c73b4a807aab31d8df602aa1660f66c17c02c..ed753747e54ce6e6f8de8b1072e0c55cdc8e5adb 100644 (file)
@@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
        int i;
 
        for (i = 0; i < BUFFER_NUM; i++) {
-               buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
+               buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
                if (IS_ERR(buffers[i]) ||
                    !check_buffer_pages_allocated(alloc, buffers[i],
                                                  sizes[i])) {