linux/drivers/android/binder.c

// SPDX-License-Identifier: GPL-2.0-only
/* binder.c
 *
 * Android IPC Subsystem
 *
 * Copyright (C) 2007-2008 Google, Inc.
 */

/*
 * Locking overview
 *
 * There are 3 main spinlocks which must be acquired in the
 * order shown:
 *
 * 1) proc->outer_lock : protects binder_ref
 *    binder_proc_lock() and binder_proc_unlock() are
 *    used to acq/rel.
 * 2) node->lock : protects most fields of binder_node.
 *    binder_node_lock() and binder_node_unlock() are
 *    used to acq/rel
 * 3) proc->inner_lock : protects the thread and node lists
 *    (proc->threads, proc->waiting_threads, proc->nodes)
 *    and all todo lists associated with the binder_proc
 *    (proc->todo, thread->todo, proc->delivered_death and
 *    node->async_todo), as well as thread->transaction_stack
 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
 *    are used to acq/rel
 *
 * Any lock under procA must never be nested under any lock at the same
 * level or below on procB.
 *
 * Functions that require a lock held on entry indicate which lock
 * in the suffix of the function name:
 *
 * foo_olocked() : requires node->outer_lock
 * foo_nlocked() : requires node->lock
 * foo_ilocked() : requires proc->inner_lock
 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
 * foo_nilocked(): requires node->lock and proc->inner_lock
 * ...
 */

#define pr_fmt(fmt)

#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
#include <linux/poll.h>
#include <linux/debugfs.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
#include <linux/sizes.h>
#include <linux/ktime.h>

#include <uapi/linux/android/binder.h>

#include <linux/cacheflush.h>

#include "binder_internal.h"
#include "binder_trace.h"

static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);

static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);

static HLIST_HEAD(binder_dead_nodes);
static DEFINE_SPINLOCK(binder_dead_nodes_lock);

static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
static atomic_t binder_last_id;

static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE();

#define FORBIDDEN_MMAP_FLAGS

enum {};
static uint32_t binder_debug_mask =;
module_param_named(debug_mask, binder_debug_mask, uint, 0644);

char *binder_devices_param =;
module_param_named(devices, binder_devices_param, charp, 0444);

static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;

static int binder_set_stop_on_user_error(const char *val,
					 const struct kernel_param *kp)
{}
module_param_call();

static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
{}

#define binder_txn_error(x...)

static __printf(1, 2) void binder_user_error(const char *format, ...)
{}

#define binder_set_extended_error(ee, _id, _command, _param)

#define to_flat_binder_object(hdr)

#define to_binder_fd_object(hdr)

#define to_binder_buffer_object(hdr)

#define to_binder_fd_array_object(hdr)

static struct binder_stats binder_stats;

static inline void binder_stats_deleted(enum binder_stat_types type)
{}

static inline void binder_stats_created(enum binder_stat_types type)
{}

struct binder_transaction_log_entry {};

struct binder_transaction_log {};

static struct binder_transaction_log binder_transaction_log;
static struct binder_transaction_log binder_transaction_log_failed;

static struct binder_transaction_log_entry *binder_transaction_log_add(
	struct binder_transaction_log *log)
{}

enum binder_deferred_state {};

enum {};

/**
 * binder_proc_lock() - Acquire outer lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Acquires proc->outer_lock. Used to protect binder_ref
 * structures associated with the given proc.
 */
#define binder_proc_lock(proc)
static void
_binder_proc_lock(struct binder_proc *proc, int line)
	__acquires(&proc->outer_lock)
{}

/**
 * binder_proc_unlock() - Release spinlock for given binder_proc
 * @proc:                struct binder_proc to acquire
 *
 * Release lock acquired via binder_proc_lock()
 */
#define binder_proc_unlock(proc)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
	__releases(&proc->outer_lock)
{}

/**
 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Acquires proc->inner_lock. Used to protect todo lists
 */
#define binder_inner_proc_lock(proc)
static void
_binder_inner_proc_lock(struct binder_proc *proc, int line)
	__acquires(&proc->inner_lock)
{}

/**
 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
 * @proc:         struct binder_proc to acquire
 *
 * Release lock acquired via binder_inner_proc_lock()
 */
#define binder_inner_proc_unlock(proc)
static void
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
	__releases(&proc->inner_lock)
{}

/**
 * binder_node_lock() - Acquire spinlock for given binder_node
 * @node:         struct binder_node to acquire
 *
 * Acquires node->lock. Used to protect binder_node fields
 */
#define binder_node_lock(node)
static void
_binder_node_lock(struct binder_node *node, int line)
	__acquires(&node->lock)
{}

/**
 * binder_node_unlock() - Release spinlock for given binder_proc
 * @node:         struct binder_node to acquire
 *
 * Release lock acquired via binder_node_lock()
 */
#define binder_node_unlock(node)
static void
_binder_node_unlock(struct binder_node *node, int line)
	__releases(&node->lock)
{}

/**
 * binder_node_inner_lock() - Acquire node and inner locks
 * @node:         struct binder_node to acquire
 *
 * Acquires node->lock. If node->proc also acquires
 * proc->inner_lock. Used to protect binder_node fields
 */
#define binder_node_inner_lock(node)
static void
_binder_node_inner_lock(struct binder_node *node, int line)
	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
{}

/**
 * binder_node_inner_unlock() - Release node and inner locks
 * @node:         struct binder_node to acquire
 *
 * Release lock acquired via binder_node_lock()
 */
#define binder_node_inner_unlock(node)
static void
_binder_node_inner_unlock(struct binder_node *node, int line)
	__releases(&node->lock) __releases(&node->proc->inner_lock)
{}

static bool binder_worklist_empty_ilocked(struct list_head *list)
{}

/**
 * binder_worklist_empty() - Check if no items on the work list
 * @proc:       binder_proc associated with list
 * @list:	list to check
 *
 * Return: true if there are no items on list, else false
 */
static bool binder_worklist_empty(struct binder_proc *proc,
				  struct list_head *list)
{}

/**
 * binder_enqueue_work_ilocked() - Add an item to the work list
 * @work:         struct binder_work to add to list
 * @target_list:  list to add work to
 *
 * Adds the work to the specified list. Asserts that work
 * is not already on a list.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
			   struct list_head *target_list)
{}

/**
 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 * flag, which means that (if it wasn't already set) the thread will go to
 * sleep without handling this work when it calls read.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
					    struct binder_work *work)
{}

/**
 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 *
 * Requires the proc->inner_lock to be held.
 */
static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
				   struct binder_work *work)
{}

/**
 * binder_enqueue_thread_work() - Add an item to the thread work list
 * @thread:       thread to queue work to
 * @work:         struct binder_work to add to list
 *
 * Adds the work to the todo list of the thread, and enables processing
 * of the todo queue.
 */
static void
binder_enqueue_thread_work(struct binder_thread *thread,
			   struct binder_work *work)
{}

static void
binder_dequeue_work_ilocked(struct binder_work *work)
{}

/**
 * binder_dequeue_work() - Removes an item from the work list
 * @proc:         binder_proc associated with list
 * @work:         struct binder_work to remove from list
 *
 * Removes the specified work item from whatever list it is on.
 * Can safely be called if work is not on any list.
 */
static void
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
{}

static struct binder_work *binder_dequeue_work_head_ilocked(
					struct list_head *list)
{}

static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);

static bool binder_has_work_ilocked(struct binder_thread *thread,
				    bool do_proc_work)
{}

static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{}

static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{}

static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
					       bool sync)
{}

/**
 * binder_select_thread_ilocked() - selects a thread for doing proc work.
 * @proc:	process to select a thread from
 *
 * Note that calling this function moves the thread off the waiting_threads
 * list, so it can only be woken up by the caller of this function, or a
 * signal. Therefore, callers *should* always wake up the thread this function
 * returns.
 *
 * Return:	If there's a thread currently waiting for process work,
 *		returns that thread. Otherwise returns NULL.
 */
static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc *proc)
{}

/**
 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
 * @proc:	process to wake up a thread in
 * @thread:	specific thread to wake-up (may be NULL)
 * @sync:	whether to do a synchronous wake-up
 *
 * This function wakes up a thread in the @proc process.
 * The caller may provide a specific thread to wake-up in
 * the @thread parameter. If @thread is NULL, this function
 * will wake up threads that have called poll().
 *
 * Note that for this function to work as expected, callers
 * should first call binder_select_thread() to find a thread
 * to handle the work (if they don't have a thread already),
 * and pass the result into the @thread parameter.
 */
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
					 struct binder_thread *thread,
					 bool sync)
{}

static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{}

static void binder_set_nice(long nice)
{}

static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
						   binder_uintptr_t ptr)
{}

static struct binder_node *binder_get_node(struct binder_proc *proc,
					   binder_uintptr_t ptr)
{}

static struct binder_node *binder_init_node_ilocked(
						struct binder_proc *proc,
						struct binder_node *new_node,
						struct flat_binder_object *fp)
{}

static struct binder_node *binder_new_node(struct binder_proc *proc,
					   struct flat_binder_object *fp)
{}

static void binder_free_node(struct binder_node *node)
{}

static int binder_inc_node_nilocked(struct binder_node *node, int strong,
				    int internal,
				    struct list_head *target_list)
{}

static int binder_inc_node(struct binder_node *node, int strong, int internal,
			   struct list_head *target_list)
{}

static bool binder_dec_node_nilocked(struct binder_node *node,
				     int strong, int internal)
{}

static void binder_dec_node(struct binder_node *node, int strong, int internal)
{}

static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
{}

/**
 * binder_inc_node_tmpref() - take a temporary reference on node
 * @node:	node to reference
 *
 * Take reference on node to prevent the node from being freed
 * while referenced only by a local variable. The inner lock is
 * needed to serialize with the node work on the queue (which
 * isn't needed after the node is dead). If the node is dead
 * (node->proc is NULL), use binder_dead_nodes_lock to protect
 * node->tmp_refs against dead-node-only cases where the node
 * lock cannot be acquired (eg traversing the dead node list to
 * print nodes)
 */
static void binder_inc_node_tmpref(struct binder_node *node)
{}

/**
 * binder_dec_node_tmpref() - remove a temporary reference on node
 * @node:	node to reference
 *
 * Release temporary reference on node taken via binder_inc_node_tmpref()
 */
static void binder_dec_node_tmpref(struct binder_node *node)
{}

static void binder_put_node(struct binder_node *node)
{}

static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
						 u32 desc, bool need_strong_ref)
{}

/* Find the smallest unused descriptor the "slow way" */
static u32 slow_desc_lookup_olocked(struct binder_proc *proc)
{}

/*
 * Find an available reference descriptor ID. The proc->outer_lock might
 * be released in the process, in which case -EAGAIN is returned and the
 * @desc should be considered invalid.
 */
static int get_ref_desc_olocked(struct binder_proc *proc,
				struct binder_node *node,
				u32 *desc)
{}

/**
 * binder_get_ref_for_node_olocked() - get the ref associated with given node
 * @proc:	binder_proc that owns the ref
 * @node:	binder_node of target
 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
 *
 * Look up the ref for the given node and return it if it exists
 *
 * If it doesn't exist and the caller provides a newly allocated
 * ref, initialize the fields of the newly allocated ref and insert
 * into the given proc rb_trees and node refs list.
 *
 * Return:	the ref for node. It is possible that another thread
 *		allocated/initialized the ref first in which case the
 *		returned ref would be different than the passed-in
 *		new_ref. new_ref must be kfree'd by the caller in
 *		this case.
 */
static struct binder_ref *binder_get_ref_for_node_olocked(
					struct binder_proc *proc,
					struct binder_node *node,
					struct binder_ref *new_ref)
{}

static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{}

/**
 * binder_inc_ref_olocked() - increment the ref for given handle
 * @ref:         ref to be incremented
 * @strong:      if true, strong increment, else weak
 * @target_list: list to queue node work on
 *
 * Increment the ref. @ref->proc->outer_lock must be held on entry
 *
 * Return: 0, if successful, else errno
 */
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
				  struct list_head *target_list)
{}

/**
 * binder_dec_ref_olocked() - dec the ref for given handle
 * @ref:	ref to be decremented
 * @strong:	if true, strong decrement, else weak
 *
 * Decrement the ref.
 *
 * Return: %true if ref is cleaned up and ready to be freed.
 */
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{}

/**
 * binder_get_node_from_ref() - get the node from the given proc/desc
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @need_strong_ref: if true, only return node if ref is strong
 * @rdata:	the id/refcount data for the ref
 *
 * Given a proc and ref handle, return the associated binder_node
 *
 * Return: a binder_node or NULL if not found or not strong when strong required
 */
static struct binder_node *binder_get_node_from_ref(
		struct binder_proc *proc,
		u32 desc, bool need_strong_ref,
		struct binder_ref_data *rdata)
{}

/**
 * binder_free_ref() - free the binder_ref
 * @ref:	ref to free
 *
 * Free the binder_ref. Free the binder_node indicated by ref->node
 * (if non-NULL) and the binder_ref_death indicated by ref->death.
 */
static void binder_free_ref(struct binder_ref *ref)
{}

/* shrink descriptor bitmap if needed */
static void try_shrink_dmap(struct binder_proc *proc)
{}

/**
 * binder_update_ref_for_handle() - inc/dec the ref for given handle
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @increment:	true=inc reference, false=dec reference
 * @strong:	true=strong reference, false=weak reference
 * @rdata:	the id/refcount data for the ref
 *
 * Given a proc and ref handle, increment or decrement the ref
 * according to "increment" arg.
 *
 * Return: 0 if successful, else errno
 */
static int binder_update_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool increment, bool strong,
		struct binder_ref_data *rdata)
{}

/**
 * binder_dec_ref_for_handle() - dec the ref for given handle
 * @proc:	proc containing the ref
 * @desc:	the handle associated with the ref
 * @strong:	true=strong reference, false=weak reference
 * @rdata:	the id/refcount data for the ref
 *
 * Just calls binder_update_ref_for_handle() to decrement the ref.
 *
 * Return: 0 if successful, else errno
 */
static int binder_dec_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool strong, struct binder_ref_data *rdata)
{}


/**
 * binder_inc_ref_for_node() - increment the ref for given proc/node
 * @proc:	 proc containing the ref
 * @node:	 target node
 * @strong:	 true=strong reference, false=weak reference
 * @target_list: worklist to use if node is incremented
 * @rdata:	 the id/refcount data for the ref
 *
 * Given a proc and node, increment the ref. Create the ref if it
 * doesn't already exist
 *
 * Return: 0 if successful, else errno
 */
static int binder_inc_ref_for_node(struct binder_proc *proc,
			struct binder_node *node,
			bool strong,
			struct list_head *target_list,
			struct binder_ref_data *rdata)
{}

static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
					   struct binder_transaction *t)
{}

/**
 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
 * @thread:	thread to decrement
 *
 * A thread needs to be kept alive while being used to create or
 * handle a transaction. binder_get_txn_from() is used to safely
 * extract t->from from a binder_transaction and keep the thread
 * indicated by t->from from being freed. When done with that
 * binder_thread, this function is called to decrement the
 * tmp_ref and free if appropriate (thread has been released
 * and no transaction being processed by the driver)
 */
static void binder_thread_dec_tmpref(struct binder_thread *thread)
{}

/**
 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
 * @proc:	proc to decrement
 *
 * A binder_proc needs to be kept alive while being used to create or
 * handle a transaction. proc->tmp_ref is incremented when
 * creating a new transaction or the binder_proc is currently in-use
 * by threads that are being released. When done with the binder_proc,
 * this function is called to decrement the counter and free the
 * proc if appropriate (proc has been released, all threads have
 * been released and not currenly in-use to process a transaction).
 */
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{}

/**
 * binder_get_txn_from() - safely extract the "from" thread in transaction
 * @t:	binder transaction for t->from
 *
 * Atomically return the "from" thread and increment the tmp_ref
 * count for the thread to ensure it stays alive until
 * binder_thread_dec_tmpref() is called.
 *
 * Return: the value of t->from
 */
static struct binder_thread *binder_get_txn_from(
		struct binder_transaction *t)
{}

/**
 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
 * @t:	binder transaction for t->from
 *
 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
 * to guarantee that the thread cannot be released while operating on it.
 * The caller must call binder_inner_proc_unlock() to release the inner lock
 * as well as call binder_dec_thread_txn() to release the reference.
 *
 * Return: the value of t->from
 */
static struct binder_thread *binder_get_txn_from_and_acq_inner(
		struct binder_transaction *t)
	__acquires(&t->from->proc->inner_lock)
{}

/**
 * binder_free_txn_fixups() - free unprocessed fd fixups
 * @t:	binder transaction for t->from
 *
 * If the transaction is being torn down prior to being
 * processed by the target process, free all of the
 * fd fixups and fput the file structs. It is safe to
 * call this function after the fixups have been
 * processed -- in that case, the list will be empty.
 */
static void binder_free_txn_fixups(struct binder_transaction *t)
{}

static void binder_txn_latency_free(struct binder_transaction *t)
{}

static void binder_free_transaction(struct binder_transaction *t)
{}

static void binder_send_failed_reply(struct binder_transaction *t,
				     uint32_t error_code)
{}

/**
 * binder_cleanup_transaction() - cleans up undelivered transaction
 * @t:		transaction that needs to be cleaned up
 * @reason:	reason the transaction wasn't delivered
 * @error_code:	error to return to caller (if synchronous call)
 */
static void binder_cleanup_transaction(struct binder_transaction *t,
				       const char *reason,
				       uint32_t error_code)
{}

/**
 * binder_get_object() - gets object and checks for valid metadata
 * @proc:	binder_proc owning the buffer
 * @u:		sender's user pointer to base of buffer
 * @buffer:	binder_buffer that we're parsing.
 * @offset:	offset in the @buffer at which to validate an object.
 * @object:	struct binder_object to read into
 *
 * Copy the binder object at the given offset into @object. If @u is
 * provided then the copy is from the sender's buffer. If not, then
 * it is copied from the target's @buffer.
 *
 * Return:	If there's a valid metadata object at @offset, the
 *		size of that object. Otherwise, it returns zero. The object
 *		is read into the struct binder_object pointed to by @object.
 */
static size_t binder_get_object(struct binder_proc *proc,
				const void __user *u,
				struct binder_buffer *buffer,
				unsigned long offset,
				struct binder_object *object)
{}

/**
 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
 * @proc:	binder_proc owning the buffer
 * @b:		binder_buffer containing the object
 * @object:	struct binder_object to read into
 * @index:	index in offset array at which the binder_buffer_object is
 *		located
 * @start_offset: points to the start of the offset array
 * @object_offsetp: offset of @object read from @b
 * @num_valid:	the number of valid offsets in the offset array
 *
 * Return:	If @index is within the valid range of the offset array
 *		described by @start and @num_valid, and if there's a valid
 *		binder_buffer_object at the offset found in index @index
 *		of the offset array, that object is returned. Otherwise,
 *		%NULL is returned.
 *		Note that the offset found in index @index itself is not
 *		verified; this function assumes that @num_valid elements
 *		from @start were previously verified to have valid offsets.
 *		If @object_offsetp is non-NULL, then the offset within
 *		@b is written to it.
 */
static struct binder_buffer_object *binder_validate_ptr(
						struct binder_proc *proc,
						struct binder_buffer *b,
						struct binder_object *object,
						binder_size_t index,
						binder_size_t start_offset,
						binder_size_t *object_offsetp,
						binder_size_t num_valid)
{}

/**
 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
 * @proc:		binder_proc owning the buffer
 * @b:			transaction buffer
 * @objects_start_offset: offset to start of objects buffer
 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
 * @fixup_offset:	start offset in @buffer to fix up
 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
 *
 * Return:		%true if a fixup in buffer @buffer at offset @offset is
 *			allowed.
 *
 * For safety reasons, we only allow fixups inside a buffer to happen
 * at increasing offsets; additionally, we only allow fixup on the last
 * buffer object that was verified, or one of its parents.
 *
 * Example of what is allowed:
 *
 * A
 *   B (parent = A, offset = 0)
 *   C (parent = A, offset = 16)
 *     D (parent = C, offset = 0)
 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
 *
 * Examples of what is not allowed:
 *
 * Decreasing offsets within the same parent:
 * A
 *   C (parent = A, offset = 16)
 *   B (parent = A, offset = 0) // decreasing offset within A
 *
 * Referring to a parent that wasn't the last object or any of its parents:
 * A
 *   B (parent = A, offset = 0)
 *   C (parent = A, offset = 0)
 *   C (parent = A, offset = 16)
 *     D (parent = B, offset = 0) // B is not A or any of A's parents
 */
static bool binder_validate_fixup(struct binder_proc *proc,
				  struct binder_buffer *b,
				  binder_size_t objects_start_offset,
				  binder_size_t buffer_obj_offset,
				  binder_size_t fixup_offset,
				  binder_size_t last_obj_offset,
				  binder_size_t last_min_offset)
{}

/**
 * struct binder_task_work_cb - for deferred close
 *
 * @twork:                callback_head for task work
 * @fd:                   fd to close
 *
 * Structure to pass task work to be handled after
 * returning from binder_ioctl() via task_work_add().
 */
struct binder_task_work_cb {};

/**
 * binder_do_fd_close() - close list of file descriptors
 * @twork:	callback head for task work
 *
 * It is not safe to call ksys_close() during the binder_ioctl()
 * function if there is a chance that binder's own file descriptor
 * might be closed. This is to meet the requirements for using
 * fdget() (see comments for __fget_light()). Therefore use
 * task_work_add() to schedule the close operation once we have
 * returned from binder_ioctl(). This function is a callback
 * for that mechanism and does the actual ksys_close() on the
 * given file descriptor.
 */
static void binder_do_fd_close(struct callback_head *twork)
{}

/**
 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
 * @fd:		file-descriptor to close
 *
 * See comments in binder_do_fd_close(). This function is used to schedule
 * a file-descriptor to be closed after returning from binder_ioctl().
 */
static void binder_deferred_fd_close(int fd)
{}

static void binder_transaction_buffer_release(struct binder_proc *proc,
					      struct binder_thread *thread,
					      struct binder_buffer *buffer,
					      binder_size_t off_end_offset,
					      bool is_failure)
{}

/* Clean up all the objects in the buffer */
static inline void binder_release_entire_buffer(struct binder_proc *proc,
						struct binder_thread *thread,
						struct binder_buffer *buffer,
						bool is_failure)
{}

static int binder_translate_binder(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{}

static int binder_translate_handle(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{}

static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
			       struct binder_transaction *t,
			       struct binder_thread *thread,
			       struct binder_transaction *in_reply_to)
{}

/**
 * struct binder_ptr_fixup - data to be fixed-up in target buffer
 * @offset	offset in target buffer to fixup
 * @skip_size	bytes to skip in copy (fixup will be written later)
 * @fixup_data	data to write at fixup offset
 * @node	list node
 *
 * This is used for the pointer fixup list (pf) which is created and consumed
 * during binder_transaction() and is only accessed locally. No
 * locking is necessary.
 *
 * The list is ordered by @offset.
 */
struct binder_ptr_fixup {};

/**
 * struct binder_sg_copy - scatter-gather data to be copied
 * @offset		offset in target buffer
 * @sender_uaddr	user address in source buffer
 * @length		bytes to copy
 * @node		list node
 *
 * This is used for the sg copy list (sgc) which is created and consumed
 * during binder_transaction() and is only accessed locally. No
 * locking is necessary.
 *
 * The list is ordered by @offset.
 */
struct binder_sg_copy {};

/**
 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
 * @alloc:	binder_alloc associated with @buffer
 * @buffer:	binder buffer in target process
 * @sgc_head:	list_head of scatter-gather copy list
 * @pf_head:	list_head of pointer fixup list
 *
 * Processes all elements of @sgc_head, applying fixups from @pf_head
 * and copying the scatter-gather data from the source process' user
 * buffer to the target's buffer. It is expected that the list creation
 * and processing all occurs during binder_transaction() so these lists
 * are only accessed in local context.
 *
 * Return: 0=success, else -errno
 */
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
					 struct binder_buffer *buffer,
					 struct list_head *sgc_head,
					 struct list_head *pf_head)
{}

/**
 * binder_cleanup_deferred_txn_lists() - free specified lists
 * @sgc_head:	list_head of scatter-gather copy list
 * @pf_head:	list_head of pointer fixup list
 *
 * Called to clean up @sgc_head and @pf_head if there is an
 * error.
 */
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
					      struct list_head *pf_head)
{}

/**
 * binder_defer_copy() - queue a scatter-gather buffer for copy
 * @sgc_head:		list_head of scatter-gather copy list
 * @offset:		binder buffer offset in target process
 * @sender_uaddr:	user address in source process
 * @length:		bytes to copy
 *
 * Specify a scatter-gather block to be copied. The actual copy must
 * be deferred until all the needed fixups are identified and queued.
 * Then the copy and fixups are done together so un-translated values
 * from the source are never visible in the target buffer.
 *
 * We are guaranteed that repeated calls to this function will have
 * monotonically increasing @offset values so the list will naturally
 * be ordered.
 *
 * Return: 0=success, else -errno
 */
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
			     const void __user *sender_uaddr, size_t length)
{}

/**
 * binder_add_fixup() - queue a fixup to be applied to sg copy
 * @pf_head:	list_head of binder ptr fixup list
 * @offset:	binder buffer offset in target process
 * @fixup:	bytes to be copied for fixup
 * @skip_size:	bytes to skip when copying (fixup will be applied later)
 *
 * Add the specified fixup to a list ordered by @offset. When copying
 * the scatter-gather buffers, the fixup will be copied instead of
 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
 * will be applied later (in target process context), so we just skip
 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
 * value in @fixup.
 *
 * This function is called *mostly* in @offset order, but there are
 * exceptions. Since out-of-order inserts are relatively uncommon,
 * we insert the new element by searching backward from the tail of
 * the list.
 *
 * Return: 0=success, else -errno
 */
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
			    binder_uintptr_t fixup, size_t skip_size)
{}

static int binder_translate_fd_array(struct list_head *pf_head,
				     struct binder_fd_array_object *fda,
				     const void __user *sender_ubuffer,
				     struct binder_buffer_object *parent,
				     struct binder_buffer_object *sender_uparent,
				     struct binder_transaction *t,
				     struct binder_thread *thread,
				     struct binder_transaction *in_reply_to)
{}

static int binder_fixup_parent(struct list_head *pf_head,
			       struct binder_transaction *t,
			       struct binder_thread *thread,
			       struct binder_buffer_object *bp,
			       binder_size_t off_start_offset,
			       binder_size_t num_valid,
			       binder_size_t last_fixup_obj_off,
			       binder_size_t last_fixup_min_off)
{}

/**
 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
 * @t1: the pending async txn in the frozen process
 * @t2: the new async txn to supersede the outdated pending one
 *
 * Return:  true if t2 can supersede t1
 *          false if t2 can not supersede t1
 */
static bool binder_can_update_transaction(struct binder_transaction *t1,
					  struct binder_transaction *t2)
{}

/**
 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
 * @t:		 new async transaction
 * @target_list: list to find outdated transaction
 *
 * Return: the outdated transaction if found
 *         NULL if no outdated transacton can be found
 *
 * Requires the proc->inner_lock to be held.
 */
static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
					 struct list_head *target_list)
{}

/**
 * binder_proc_transaction() - sends a transaction to a process and wakes it up
 * @t:		transaction to send
 * @proc:	process to send the transaction to
 * @thread:	thread in @proc to send the transaction to (may be NULL)
 *
 * This function queues a transaction to the specified process. It will try
 * to find a thread in the target process to handle the transaction and
 * wake it up. If no thread is found, the work is queued to the proc
 * waitqueue.
 *
 * If the @thread parameter is not NULL, the transaction is always queued
 * to the waitlist of that specific thread.
 *
 * Return:	0 if the transaction was successfully queued
 *		BR_DEAD_REPLY if the target process or thread is dead
 *		BR_FROZEN_REPLY if the target process or thread is frozen and
 *			the sync transaction was rejected
 *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
 *		and the async transaction was successfully queued
 */
static int binder_proc_transaction(struct binder_transaction *t,
				    struct binder_proc *proc,
				    struct binder_thread *thread)
{}

/**
 * binder_get_node_refs_for_txn() - Get required refs on node for txn
 * @node:         struct binder_node for which to get refs
 * @procp:        returns @node->proc if valid
 * @error:        if no @procp then returns BR_DEAD_REPLY
 *
 * User-space normally keeps the node alive when creating a transaction
 * since it has a reference to the target. The local strong ref keeps it
 * alive if the sending process dies before the target process processes
 * the transaction. If the source process is malicious or has a reference
 * counting bug, relying on the local strong ref can fail.
 *
 * Since user-space can cause the local strong ref to go away, we also take
 * a tmpref on the node to ensure it survives while we are constructing
 * the transaction. We also need a tmpref on the proc while we are
 * constructing the transaction, so we take that here as well.
 *
 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
 * target proc has died, @error is set to BR_DEAD_REPLY.
 */
static struct binder_node *binder_get_node_refs_for_txn(
		struct binder_node *node,
		struct binder_proc **procp,
		uint32_t *error)
{}

static void binder_set_txn_from_error(struct binder_transaction *t, int id,
				      uint32_t command, int32_t param)
{}

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{}

/**
 * binder_free_buf() - free the specified buffer
 * @proc:	binder proc that owns buffer
 * @buffer:	buffer to be freed
 * @is_failure:	failed to send transaction
 *
 * If buffer for an async transaction, enqueue the next async
 * transaction from the node.
 *
 * Cleanup buffer and free it.
 */
static void
binder_free_buf(struct binder_proc *proc,
		struct binder_thread *thread,
		struct binder_buffer *buffer, bool is_failure)
{}

static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{}

static void binder_stat_br(struct binder_proc *proc,
			   struct binder_thread *thread, uint32_t cmd)
{}

static int binder_put_node_cmd(struct binder_proc *proc,
			       struct binder_thread *thread,
			       void __user **ptrp,
			       binder_uintptr_t node_ptr,
			       binder_uintptr_t node_cookie,
			       int node_debug_id,
			       uint32_t cmd, const char *cmd_name)
{}

static int binder_wait_for_work(struct binder_thread *thread,
				bool do_proc_work)
{}

/**
 * binder_apply_fd_fixups() - finish fd translation
 * @proc:         binder_proc associated @t->buffer
 * @t:	binder transaction with list of fd fixups
 *
 * Now that we are in the context of the transaction target
 * process, we can allocate and install fds. Process the
 * list of fds to translate and fixup the buffer with the
 * new fds first and only then install the files.
 *
 * If we fail to allocate an fd, skip the install and release
 * any fds that have already been allocated.
 */
static int binder_apply_fd_fixups(struct binder_proc *proc,
				  struct binder_transaction *t)
{}

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{}

static void binder_release_work(struct binder_proc *proc,
				struct list_head *list)
{}

static struct binder_thread *binder_get_thread_ilocked(
		struct binder_proc *proc, struct binder_thread *new_thread)
{}

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{}

static void binder_free_proc(struct binder_proc *proc)
{}

static void binder_free_thread(struct binder_thread *thread)
{}

static int binder_thread_release(struct binder_proc *proc,
				 struct binder_thread *thread)
{}

static __poll_t binder_poll(struct file *filp,
				struct poll_table_struct *wait)
{}

static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
				struct binder_thread *thread)
{}

static int binder_ioctl_set_ctx_mgr(struct file *filp,
				    struct flat_binder_object *fbo)
{}

static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
		struct binder_node_info_for_ref *info)
{}

static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
				struct binder_node_debug_info *info)
{}

static bool binder_txns_pending_ilocked(struct binder_proc *proc)
{}

static int binder_ioctl_freeze(struct binder_freeze_info *info,
			       struct binder_proc *target_proc)
{}

static int binder_ioctl_get_freezer_info(
				struct binder_frozen_status_info *info)
{}

static int binder_ioctl_get_extended_error(struct binder_thread *thread,
					   void __user *ubuf)
{}

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{}

static void binder_vma_open(struct vm_area_struct *vma)
{}

static void binder_vma_close(struct vm_area_struct *vma)
{}

static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct binder_vm_ops =;

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{}

static int binder_open(struct inode *nodp, struct file *filp)
{}

static int binder_flush(struct file *filp, fl_owner_t id)
{}

static void binder_deferred_flush(struct binder_proc *proc)
{}

static int binder_release(struct inode *nodp, struct file *filp)
{}

static int binder_node_release(struct binder_node *node, int refs)
{}

static void binder_deferred_release(struct binder_proc *proc)
{}

static void binder_deferred_func(struct work_struct *work)
{}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);

static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{}

static void print_binder_transaction_ilocked(struct seq_file *m,
					     struct binder_proc *proc,
					     const char *prefix,
					     struct binder_transaction *t)
{}

static void print_binder_work_ilocked(struct seq_file *m,
				     struct binder_proc *proc,
				     const char *prefix,
				     const char *transaction_prefix,
				     struct binder_work *w)
{}

static void print_binder_thread_ilocked(struct seq_file *m,
					struct binder_thread *thread,
					int print_always)
{}

static void print_binder_node_nilocked(struct seq_file *m,
				       struct binder_node *node)
{}

static void print_binder_ref_olocked(struct seq_file *m,
				     struct binder_ref *ref)
{}

static void print_binder_proc(struct seq_file *m,
			      struct binder_proc *proc, int print_all)
{}

static const char * const binder_return_strings[] =;

static const char * const binder_command_strings[] =;

static const char * const binder_objstat_strings[] =;

static void print_binder_stats(struct seq_file *m, const char *prefix,
			       struct binder_stats *stats)
{}

static void print_binder_proc_stats(struct seq_file *m,
				    struct binder_proc *proc)
{}

static int state_show(struct seq_file *m, void *unused)
{}

static int stats_show(struct seq_file *m, void *unused)
{}

static int transactions_show(struct seq_file *m, void *unused)
{}

static int proc_show(struct seq_file *m, void *unused)
{}

static void print_binder_transaction_log_entry(struct seq_file *m,
					struct binder_transaction_log_entry *e)
{}

static int transaction_log_show(struct seq_file *m, void *unused)
{}

const struct file_operations binder_fops =;

DEFINE_SHOW_ATTRIBUTE();
DEFINE_SHOW_ATTRIBUTE();
DEFINE_SHOW_ATTRIBUTE();
DEFINE_SHOW_ATTRIBUTE();

const struct binder_debugfs_entry binder_debugfs_entries[] =;

static int __init init_binder_device(const char *name)
{}

static int __init binder_init(void)
{}

device_initcall(binder_init);

#define CREATE_TRACE_POINTS
#include "binder_trace.h"

MODULE_LICENSE();