#define pr_fmt(fmt) …
#include <linux/list.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
#include <linux/rtmutex.h>
#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
#include <linux/ratelimit.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
#include "binder_alloc.h"
#include "binder_trace.h"
struct list_lru binder_freelist;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum { … };
static uint32_t binder_alloc_debug_mask = …;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
#define binder_alloc_debug(mask, x...) …
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
{ … }
static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
{ … }
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{ … }
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
{ … }
static void binder_insert_allocated_buffer_locked(
struct binder_alloc *alloc, struct binder_buffer *new_buffer)
{ … }
static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_alloc *alloc,
unsigned long user_ptr)
{ … }
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
unsigned long user_ptr)
{ … }
static inline void
binder_set_installed_page(struct binder_lru_page *lru_page,
struct page *page)
{ … }
static inline struct page *
binder_get_installed_page(struct binder_lru_page *lru_page)
{ … }
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{ … }
static int binder_install_single_page(struct binder_alloc *alloc,
struct binder_lru_page *lru_page,
unsigned long addr)
{ … }
static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{ … }
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{ … }
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{ … }
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{ … }
static void debug_no_space_locked(struct binder_alloc *alloc)
{ … }
static bool debug_low_async_space_locked(struct binder_alloc *alloc)
{ … }
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
struct binder_buffer *new_buffer,
size_t size,
int is_async)
{ … }
static inline size_t sanitized_size(size_t data_size,
size_t offsets_size,
size_t extra_buffers_size)
{ … }
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{ … }
static unsigned long buffer_start_page(struct binder_buffer *buffer)
{ … }
static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
{ … }
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{ … }
static void binder_free_buf_locked(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{ … }
static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
pgoff_t *pgoffp)
{ … }
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{ … }
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{ … }
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{ … }
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{ … }
void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc)
{ … }
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{ … }
int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
{ … }
void binder_alloc_vma_close(struct binder_alloc *alloc)
{ … }
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
spinlock_t *lock,
void *cb_arg)
__must_hold(lock)
{ … }
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{ … }
static unsigned long
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{ … }
static struct shrinker *binder_shrinker;
void binder_alloc_init(struct binder_alloc *alloc)
{ … }
int binder_alloc_shrinker_init(void)
{ … }
void binder_alloc_shrinker_exit(void)
{ … }
static inline bool check_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,
binder_size_t offset, size_t bytes)
{ … }
unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
const void __user *from,
size_t bytes)
{ … }
static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
bool to_buffer,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
void *ptr,
size_t bytes)
{ … }
int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
void *src,
size_t bytes)
{ … }
int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
void *dest,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
size_t bytes)
{ … }