linux/drivers/acpi/osl.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
 *
 *  Copyright (C) 2000       Andrew Henroid
 *  Copyright (C) 2001, 2002 Andy Grover <[email protected]>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
 *  Copyright (c) 2008 Intel Corporation
 *   Author: Matthew Wilcox <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/jiffies.h>
#include <linux/semaphore.h>
#include <linux/security.h>

#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/io-64-nonatomic-lo-hi.h>

#include "acpica/accommon.h"
#include "internal.h"

/* Definitions for ACPI_DEBUG_PRINT() */
#define _COMPONENT
ACPI_MODULE_NAME("osl");

struct acpi_os_dpc {};

#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>

/* stuff for debugger support */
int acpi_in_debugger;
EXPORT_SYMBOL(acpi_in_debugger);
#endif				/*ENABLE_DEBUGGER */

static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
				      u32 pm1b_ctrl);
static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
				      u32 val_b);

static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
static bool acpi_os_initialized;
unsigned int acpi_sci_irq =;
bool acpi_permanent_mmap =;

/*
 * This list of permanent mappings is for memory that may be accessed from
 * interrupt context, where we can't do the ioremap().
 */
struct acpi_ioremap {};

static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
#define acpi_ioremap_lock_held()

static void __init acpi_request_region (struct acpi_generic_address *gas,
	unsigned int length, char *desc)
{}

static int __init acpi_reserve_resources(void)
{}
fs_initcall_sync(acpi_reserve_resources);

void acpi_os_printf(const char *fmt, ...)
{}
EXPORT_SYMBOL();

void __printf(1, 0) acpi_os_vprintf(const char *fmt, va_list args)
{}

#ifdef CONFIG_KEXEC
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{}
early_param();
#endif

acpi_physical_address __init acpi_os_get_root_pointer(void)
{}

/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys, acpi_size size)
{}

/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static void __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
{}

void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
{}
EXPORT_SYMBOL_GPL();

/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
{}

#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
/* ioremap will take care of cache attributes */
#define should_use_kmap
#else
#define should_use_kmap(pfn)
#endif

static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{}

static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
{}

/**
 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
 * @phys: Start of the physical address range to map.
 * @size: Size of the physical address range to map.
 *
 * Look up the given physical address range in the list of existing ACPI memory
 * mappings.  If found, get a reference to it and return a pointer to it (its
 * virtual address).  If not found, map it, add it to that list and return a
 * pointer to it.
 *
 * During early init (when acpi_permanent_mmap has not been set yet) this
 * routine simply calls __acpi_map_table() to get the job done.
 */
void __iomem __ref
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{}
EXPORT_SYMBOL_GPL();

void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{}
EXPORT_SYMBOL_GPL();

static void acpi_os_map_remove(struct work_struct *work)
{}

/* Must be called with mutex_lock(&acpi_ioremap_lock) */
static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{}

/**
 * acpi_os_unmap_iomem - Drop a memory mapping reference.
 * @virt: Start of the address range to drop a reference to.
 * @size: Size of the address range to drop a reference to.
 *
 * Look up the given virtual address range in the list of existing ACPI memory
 * mappings, drop a reference to it and if there are no more active references
 * to it, queue it up for later removal.
 *
 * During early init (when acpi_permanent_mmap has not been set yet) this
 * routine simply calls __acpi_unmap_table() to get the job done.  Since
 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
 * here.
 */
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{}
EXPORT_SYMBOL_GPL();

/**
 * acpi_os_unmap_memory - Drop a memory mapping reference.
 * @virt: Start of the address range to drop a reference to.
 * @size: Size of the address range to drop a reference to.
 */
void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{}
EXPORT_SYMBOL_GPL();

void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
{}
EXPORT_SYMBOL();

void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{}
EXPORT_SYMBOL();

#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
{
	if (!phys || !virt)
		return AE_BAD_PARAMETER;

	*phys = virt_to_phys(virt);

	return AE_OK;
}
#endif

#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static bool acpi_rev_override;

int __init acpi_rev_override_setup(char *str)
{}
__setup();
#else
#define acpi_rev_override
#endif

#define ACPI_MAX_OVERRIDE_LEN

static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];

acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
			    acpi_string *new_val)
{}

static irqreturn_t acpi_irq(int irq, void *dev_id)
{}

acpi_status
acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
				  void *context)
{}

acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
{}

/*
 * Running in interpreter thread context, safe to sleep
 */

void acpi_os_sleep(u64 ms)
{}

void acpi_os_stall(u32 us)
{}

/*
 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
 * monotonically increasing timer with 100ns granularity. Do not use
 * ktime_get() to implement this function because this function may get
 * called after timekeeping has been suspended. Note: calling this function
 * after timekeeping has been suspended may lead to unexpected results
 * because when timekeeping is suspended the jiffies counter is not
 * incremented. See also timekeeping_suspend().
 */
u64 acpi_os_get_timer(void)
{}

acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{}

EXPORT_SYMBOL();

acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{}

EXPORT_SYMBOL();

int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
{}

acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{}

acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
{}

#ifdef CONFIG_PCI
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 reg,
			       u64 *value, u32 width)
{}

acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 reg,
				u64 value, u32 width)
{}
#endif

static void acpi_os_execute_deferred(struct work_struct *work)
{}

#ifdef CONFIG_ACPI_DEBUGGER
static struct acpi_debugger acpi_debugger;
static bool acpi_debugger_initialized;

int acpi_register_debugger(struct module *owner,
			   const struct acpi_debugger_ops *ops)
{}
EXPORT_SYMBOL();

void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
{}
EXPORT_SYMBOL();

int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
{}

ssize_t acpi_debugger_write_log(const char *msg)
{}

ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
{}

int acpi_debugger_wait_command_ready(void)
{}

int acpi_debugger_notify_command_complete(void)
{}

int __init acpi_debugger_init(void)
{}
#endif

/*******************************************************************************
 *
 * FUNCTION:    acpi_os_execute
 *
 * PARAMETERS:  Type               - Type of the callback
 *              Function           - Function to be executed
 *              Context            - Function parameters
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Depending on type, either queues function for deferred execution or
 *              immediately executes function on a separate thread.
 *
 ******************************************************************************/

acpi_status acpi_os_execute(acpi_execute_type type,
			    acpi_osd_exec_callback function, void *context)
{}
EXPORT_SYMBOL();

void acpi_os_wait_events_complete(void)
{}
EXPORT_SYMBOL();

struct acpi_hp_work {};

static void acpi_hotplug_work_fn(struct work_struct *work)
{}

acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{}

bool acpi_queue_hotplug_work(struct work_struct *work)
{}

acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *handle)
{}

/*
 * TODO: A better way to delete semaphores?  Linux doesn't have a
 * 'delete_semaphore()' function -- may result in an invalid
 * pointer dereference for non-synchronized consumers.	Should
 * we at least check for blocked threads and signal/cancel them?
 */

acpi_status acpi_os_delete_semaphore(acpi_handle handle)
{}

/*
 * TODO: Support for units > 1?
 */
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
{}

/*
 * TODO: Support for units > 1?
 */
acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
{}

acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
{}
EXPORT_SYMBOL();

acpi_status acpi_os_wait_command_ready(void)
{}

acpi_status acpi_os_notify_command_complete(void)
{}

acpi_status acpi_os_signal(u32 function, void *info)
{}

static int __init acpi_os_name_setup(char *str)
{}

__setup();

/*
 * Disable the auto-serialization of named objects creation methods.
 *
 * This feature is enabled by default.  It marks the AML control methods
 * that contain the opcodes to create named objects as "Serialized".
 */
static int __init acpi_no_auto_serialize_setup(char *str)
{}

__setup();

/* Check of resource interference between native drivers and ACPI
 * OperationRegions (SystemIO and System Memory only).
 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
 * in arbitrary AML code and can interfere with legacy drivers.
 * acpi_enforce_resources= can be set to:
 *
 *   - strict (default) (2)
 *     -> further driver trying to access the resources will not load
 *   - lax              (1)
 *     -> further driver trying to access the resources will load, but you
 *     get a system message that something might go wrong...
 *
 *   - no               (0)
 *     -> ACPI Operation Region resources will not be registered
 *
 */
#define ENFORCE_RESOURCES_STRICT
#define ENFORCE_RESOURCES_LAX
#define ENFORCE_RESOURCES_NO

static unsigned int acpi_enforce_resources =;

static int __init acpi_enforce_resources_setup(char *str)
{}

__setup();

/* Check for resource conflicts between ACPI OperationRegions and native
 * drivers */
int acpi_check_resource_conflict(const struct resource *res)
{}
EXPORT_SYMBOL();

int acpi_check_region(resource_size_t start, resource_size_t n,
		      const char *name)
{}
EXPORT_SYMBOL();

/*
 * Let drivers know whether the resource checks are effective
 */
int acpi_resources_are_enforced(void)
{}
EXPORT_SYMBOL();

/*
 * Deallocate the memory for a spinlock.
 */
void acpi_os_delete_lock(acpi_spinlock handle)
{}

/*
 * Acquire a spinlock.
 *
 * handle is a pointer to the spinlock_t.
 */

acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
	__acquires(lockp)
{}

/*
 * Release a spinlock. See above.
 */

void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used)
	__releases(lockp)
{}

#ifndef ACPI_USE_LOCAL_CACHE

/*******************************************************************************
 *
 * FUNCTION:    acpi_os_create_cache
 *
 * PARAMETERS:  name      - Ascii name for the cache
 *              size      - Size of each cached object
 *              depth     - Maximum depth of the cache (in objects) <ignored>
 *              cache     - Where the new cache object is returned
 *
 * RETURN:      status
 *
 * DESCRIPTION: Create a cache object
 *
 ******************************************************************************/

acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t **cache)
{}

/*******************************************************************************
 *
 * FUNCTION:    acpi_os_purge_cache
 *
 * PARAMETERS:  Cache           - Handle to cache object
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Free all objects within the requested cache.
 *
 ******************************************************************************/

acpi_status acpi_os_purge_cache(acpi_cache_t *cache)
{}

/*******************************************************************************
 *
 * FUNCTION:    acpi_os_delete_cache
 *
 * PARAMETERS:  Cache           - Handle to cache object
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Free all objects within the requested cache and delete the
 *              cache object.
 *
 ******************************************************************************/

acpi_status acpi_os_delete_cache(acpi_cache_t *cache)
{}

/*******************************************************************************
 *
 * FUNCTION:    acpi_os_release_object
 *
 * PARAMETERS:  Cache       - Handle to cache object
 *              Object      - The object to be released
 *
 * RETURN:      None
 *
 * DESCRIPTION: Release an object to the specified cache.  If cache is full,
 *              the object is deleted.
 *
 ******************************************************************************/

acpi_status acpi_os_release_object(acpi_cache_t *cache, void *object)
{}
#endif

static int __init acpi_no_static_ssdt_setup(char *s)
{}

early_param();

static int __init acpi_disable_return_repair(char *s)
{}

__setup();

acpi_status __init acpi_os_initialize(void)
{}

acpi_status __init acpi_os_initialize1(void)
{}

acpi_status acpi_os_terminate(void)
{}

acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
				  u32 pm1b_control)
{}

void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
			       u32 pm1a_ctrl, u32 pm1b_ctrl))
{}

#if (ACPI_REDUCED_HARDWARE)
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
				  u32 val_b)
{}
#else
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
				  u32 val_b)
{
	return AE_OK;
}
#endif

void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
			       u32 val_a, u32 val_b))
{}

acpi_status acpi_os_enter_sleep(u8 sleep_state,
				u32 reg_a_value, u32 reg_b_value)
{}