linux/drivers/acpi/ec.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  ec.c - ACPI Embedded Controller Driver (v3)
 *
 *  Copyright (C) 2001-2015 Intel Corporation
 *    Author: 2014, 2015 Lv Zheng <[email protected]>
 *            2006, 2007 Alexey Starikovskiy <[email protected]>
 *            2006       Denis Sadykov <[email protected]>
 *            2004       Luming Yu <[email protected]>
 *            2001, 2002 Andy Grover <[email protected]>
 *            2001, 2002 Paul Diefenbaugh <[email protected]>
 *  Copyright (C) 2008      Alexey Starikovskiy <[email protected]>
 */

/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/io.h>

#include "internal.h"

#define ACPI_EC_CLASS
#define ACPI_EC_DEVICE_NAME

/* EC status register */
#define ACPI_EC_FLAG_OBF
#define ACPI_EC_FLAG_IBF
#define ACPI_EC_FLAG_CMD
#define ACPI_EC_FLAG_BURST
#define ACPI_EC_FLAG_SCI

/*
 * The SCI_EVT clearing timing is not defined by the ACPI specification.
 * This leads to lots of practical timing issues for the host EC driver.
 * The following variations are defined (from the target EC firmware's
 * perspective):
 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
 *         target can clear SCI_EVT at any time so long as the host can see
 *         the indication by reading the status register (EC_SC). So the
 *         host should re-check SCI_EVT after the first time the SCI_EVT
 *         indication is seen, which is the same time the query request
 *         (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
 *         at any later time could indicate another event. Normally such
 *         kind of EC firmware has implemented an event queue and will
 *         return 0x00 to indicate "no outstanding event".
 * QUERY: After seeing the query request (QR_EC) written to the command
 *        register (EC_CMD) by the host and having prepared the responding
 *        event value in the data register (EC_DATA), the target can safely
 *        clear SCI_EVT because the target can confirm that the current
 *        event is being handled by the host. The host then should check
 *        SCI_EVT right after reading the event response from the data
 *        register (EC_DATA).
 * EVENT: After seeing the event response read from the data register
 *        (EC_DATA) by the host, the target can clear SCI_EVT. As the
 *        target requires time to notice the change in the data register
 *        (EC_DATA), the host may be required to wait additional guarding
 *        time before checking the SCI_EVT again. Such guarding may not be
 *        necessary if the host is notified via another IRQ.
 */
#define ACPI_EC_EVT_TIMING_STATUS
#define ACPI_EC_EVT_TIMING_QUERY
#define ACPI_EC_EVT_TIMING_EVENT

/* EC commands */
enum ec_command {};

#define ACPI_EC_DELAY
#define ACPI_EC_UDELAY_GLK
#define ACPI_EC_UDELAY_POLL
#define ACPI_EC_CLEAR_MAX
#define ACPI_EC_MAX_QUERIES

enum {};

#define ACPI_EC_COMMAND_POLL
#define ACPI_EC_COMMAND_COMPLETE

/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly =;
module_param(ec_delay, uint, 0644);
MODULE_PARM_DESC();

static unsigned int ec_max_queries __read_mostly =;
module_param(ec_max_queries, uint, 0644);
MODULE_PARM_DESC();

static bool ec_busy_polling __read_mostly;
module_param(ec_busy_polling, bool, 0644);
MODULE_PARM_DESC();

static unsigned int ec_polling_guard __read_mostly =;
module_param(ec_polling_guard, uint, 0644);
MODULE_PARM_DESC();

static unsigned int ec_event_clearing __read_mostly =;

/*
 * If the number of false interrupts per one transaction exceeds
 * this threshold, will think there is a GPE storm happened and
 * will disable the GPE for normal transaction.
 */
static unsigned int ec_storm_threshold  __read_mostly =;
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC();

static bool ec_freeze_events __read_mostly;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC();

static bool ec_no_wakeup __read_mostly;
module_param(ec_no_wakeup, bool, 0644);
MODULE_PARM_DESC();

struct acpi_ec_query_handler {};

struct transaction {};

struct acpi_ec_query {};

static int acpi_ec_submit_query(struct acpi_ec *ec);
static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);

struct acpi_ec *first_ec;
EXPORT_SYMBOL();

static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt;
static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;

static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */

/* --------------------------------------------------------------------------
 *                           Logging/Debugging
 * -------------------------------------------------------------------------- */

/*
 * Splitters used by the developers to track the boundary of the EC
 * handling processes.
 */
#ifdef DEBUG
#define EC_DBG_SEP
#define EC_DBG_DRV
#define EC_DBG_STM
#define EC_DBG_REQ
#define EC_DBG_EVT
#else
#define EC_DBG_SEP
#define EC_DBG_DRV
#define EC_DBG_STM
#define EC_DBG_REQ
#define EC_DBG_EVT
#endif

#define ec_log_raw(fmt, ...)
#define ec_dbg_raw(fmt, ...)
#define ec_log(filter, fmt, ...)
#define ec_dbg(filter, fmt, ...)

#define ec_log_drv(fmt, ...)
#define ec_dbg_drv(fmt, ...)
#define ec_dbg_stm(fmt, ...)
#define ec_dbg_req(fmt, ...)
#define ec_dbg_evt(fmt, ...)
#define ec_dbg_ref(ec, fmt, ...)

/* --------------------------------------------------------------------------
 *                           Device Flags
 * -------------------------------------------------------------------------- */

static bool acpi_ec_started(struct acpi_ec *ec)
{}

static bool acpi_ec_event_enabled(struct acpi_ec *ec)
{}

static bool acpi_ec_flushed(struct acpi_ec *ec)
{}

/* --------------------------------------------------------------------------
 *                           EC Registers
 * -------------------------------------------------------------------------- */

static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{}

static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{}

static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{}

static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{}

#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
static const char *acpi_ec_cmd_string(u8 cmd)
{}
#else
#define acpi_ec_cmd_string
#endif

/* --------------------------------------------------------------------------
 *                           GPE Registers
 * -------------------------------------------------------------------------- */

static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
{}

static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
{}

static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
{}

/* --------------------------------------------------------------------------
 *                           Transaction Management
 * -------------------------------------------------------------------------- */

static void acpi_ec_submit_request(struct acpi_ec *ec)
{}

static void acpi_ec_complete_request(struct acpi_ec *ec)
{}

static void acpi_ec_mask_events(struct acpi_ec *ec)
{}

static void acpi_ec_unmask_events(struct acpi_ec *ec)
{}

/*
 * acpi_ec_submit_flushable_request() - Increase the reference count unless
 *                                      the flush operation is not in
 *                                      progress
 * @ec: the EC device
 *
 * This function must be used before taking a new action that should hold
 * the reference count.  If this function returns false, then the action
 * must be discarded or it will prevent the flush operation from being
 * completed.
 */
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
{}

static void acpi_ec_submit_event(struct acpi_ec *ec)
{}

static void acpi_ec_complete_event(struct acpi_ec *ec)
{}

static void acpi_ec_close_event(struct acpi_ec *ec)
{}

static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
{}

static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
{}

/*
 * Process _Q events that might have accumulated in the EC.
 * Run with locked ec mutex.
 */
static void acpi_ec_clear(struct acpi_ec *ec)
{}

static void acpi_ec_enable_event(struct acpi_ec *ec)
{}

#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{}

static void acpi_ec_disable_event(struct acpi_ec *ec)
{}

void acpi_ec_flush_work(void)
{}
#endif /* CONFIG_PM_SLEEP */

static bool acpi_ec_guard_event(struct acpi_ec *ec)
{}

static int ec_transaction_polled(struct acpi_ec *ec)
{}

static int ec_transaction_completed(struct acpi_ec *ec)
{}

static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
{}

static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
{}

static void advance_transaction(struct acpi_ec *ec, bool interrupt)
{}

static void start_transaction(struct acpi_ec *ec)
{}

static int ec_guard(struct acpi_ec *ec)
{}

static int ec_poll(struct acpi_ec *ec)
{}

static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
					struct transaction *t)
{}

static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
{}

static int acpi_ec_burst_enable(struct acpi_ec *ec)
{}

static int acpi_ec_burst_disable(struct acpi_ec *ec)
{}

static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
{}

static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
{}

static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{}

static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
{}

int ec_read(u8 addr, u8 *val)
{}
EXPORT_SYMBOL();

int ec_write(u8 addr, u8 val)
{}
EXPORT_SYMBOL();

int ec_transaction(u8 command,
		   const u8 *wdata, unsigned wdata_len,
		   u8 *rdata, unsigned rdata_len)
{}
EXPORT_SYMBOL();

/* Get the handle to the EC device */
acpi_handle ec_get_handle(void)
{}
EXPORT_SYMBOL();

static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{}

static bool acpi_ec_stopped(struct acpi_ec *ec)
{}

static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{}

static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{}

static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{}

void acpi_ec_block_transactions(void)
{}

void acpi_ec_unblock_transactions(void)
{}

/* --------------------------------------------------------------------------
                                Event Management
   -------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{}

static void acpi_ec_query_handler_release(struct kref *kref)
{}

static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
{}

int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
			      acpi_handle handle, acpi_ec_query_func func,
			      void *data)
{}
EXPORT_SYMBOL_GPL();

static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
					  bool remove_all, u8 query_bit)
{}

void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{}
EXPORT_SYMBOL_GPL();

static void acpi_ec_event_processor(struct work_struct *work)
{}

static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
{}

static int acpi_ec_submit_query(struct acpi_ec *ec)
{}

static void acpi_ec_event_handler(struct work_struct *work)
{}

static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
{}

static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{}

static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
			       u32 gpe_number, void *data)
{}

static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
{}

/* --------------------------------------------------------------------------
 *                           Address Space Management
 * -------------------------------------------------------------------------- */

static acpi_status
acpi_ec_space_handler(u32 function, acpi_physical_address address,
		      u32 bits, u64 *value64,
		      void *handler_context, void *region_context)
{}

/* --------------------------------------------------------------------------
 *                             Driver Interface
 * -------------------------------------------------------------------------- */

static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context);

static void acpi_ec_free(struct acpi_ec *ec)
{}

static struct acpi_ec *acpi_ec_alloc(void)
{}

static acpi_status
acpi_ec_register_query_methods(acpi_handle handle, u32 level,
			       void *context, void **return_value)
{}

static acpi_status
ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
{}

static bool install_gpe_event_handler(struct acpi_ec *ec)
{}

static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
{}

/**
 * ec_install_handlers - Install service callbacks and register query methods.
 * @ec: Target EC.
 * @device: ACPI device object corresponding to @ec.
 * @call_reg: If _REG should be called to notify OpRegion availability
 *
 * Install a handler for the EC address space type unless it has been installed
 * already.  If @device is not NULL, also look for EC query methods in the
 * namespace and register them, and install an event (either GPE or GPIO IRQ)
 * handler for the EC, if possible.
 *
 * Return:
 * -ENODEV if the address space handler cannot be installed, which means
 *  "unable to handle transactions",
 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
 * or 0 (success) otherwise.
 */
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
			       bool call_reg)
{}

static void ec_remove_handlers(struct acpi_ec *ec)
{}

static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg)
{}

static int acpi_ec_add(struct acpi_device *device)
{}

static void acpi_ec_remove(struct acpi_device *device)
{}

void acpi_ec_register_opregions(struct acpi_device *adev)
{}

static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context)
{}

static const struct acpi_device_id ec_device_ids[] =;

/*
 * This function is not Windows-compatible as Windows never enumerates the
 * namespace EC before the main ACPI device enumeration process. It is
 * retained for historical reason and will be deprecated in the future.
 */
void __init acpi_ec_dsdt_probe(void)
{}

/*
 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
 *
 * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
 * found a matching object in the namespace.
 *
 * Next, in case the DSDT EC is not functioning, it is still necessary to
 * provide a functional ECDT EC to handle events, so add an extra device object
 * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
 *
 * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
 * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
 */
static void __init acpi_ec_ecdt_start(void)
{}

/*
 * On some hardware it is necessary to clear events accumulated by the EC during
 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
 *
 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
 *
 * Ideally, the EC should also be instructed NOT to accumulate events during
 * sleep (which Windows seems to do somehow), but the interface to control this
 * behaviour is not known at this time.
 *
 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
 * however it is very likely that other Samsung models are affected.
 *
 * On systems which don't accumulate _Q events during sleep, this extra check
 * should be harmless.
 */
static int ec_clear_on_resume(const struct dmi_system_id *id)
{}

/*
 * Some ECDTs contain wrong register addresses.
 * MSI MS-171F
 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
 */
static int ec_correct_ecdt(const struct dmi_system_id *id)
{}

/*
 * Some ECDTs contain wrong GPE setting, but they share the same port addresses
 * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
 * https://bugzilla.kernel.org/show_bug.cgi?id=209989
 */
static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
{}

static const struct dmi_system_id ec_dmi_table[] __initconst =;

void __init acpi_ec_ecdt_probe(void)
{}

#ifdef CONFIG_PM_SLEEP
static int acpi_ec_suspend(struct device *dev)
{}

static int acpi_ec_suspend_noirq(struct device *dev)
{}

static int acpi_ec_resume_noirq(struct device *dev)
{}

static int acpi_ec_resume(struct device *dev)
{}

void acpi_ec_mark_gpe_for_wake(void)
{}
EXPORT_SYMBOL_GPL();

void acpi_ec_set_gpe_wake_mask(u8 action)
{}

static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
{}

bool acpi_ec_dispatch_gpe(void)
{}
#endif /* CONFIG_PM_SLEEP */

static const struct dev_pm_ops acpi_ec_pm =;

static int param_set_event_clearing(const char *val,
				    const struct kernel_param *kp)
{}

static int param_get_event_clearing(char *buffer,
				    const struct kernel_param *kp)
{}

module_param_call();
MODULE_PARM_DESC();

static struct acpi_driver acpi_ec_driver =;

static void acpi_ec_destroy_workqueues(void)
{}

static int acpi_ec_init_workqueues(void)
{}

static const struct dmi_system_id acpi_ec_no_wakeup[] =;

void __init acpi_ec_init(void)
{}

/* EC driver currently not unloadable */
#if 0
static void __exit acpi_ec_exit(void)
{

	acpi_bus_unregister_driver(&acpi_ec_driver);
	acpi_ec_destroy_workqueues();
}
#endif	/* 0 */