trace_binder_alloc_page_start(alloc, index);
- page->alloc = alloc;
- INIT_LIST_HEAD(&page->lru);
-
ret = binder_install_single_page(alloc, page, page_addr);
if (ret) {
binder_free_page_range(alloc, start, page_addr);
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- int ret;
- const char *failure_string;
struct binder_buffer *buffer;
+ const char *failure_string;
+ int ret, i;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
goto err_alloc_pages_failed;
}
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ alloc->pages[i].alloc = alloc;
+ INIT_LIST_HEAD(&alloc->pages[i].lru);
+ }
+
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;