linux/drivers/acpi/cppc_acpi.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
 *
 * (C) Copyright 2014, 2015 Linaro Ltd.
 * Author: Ashwin Chaugule <[email protected]>
 *
 * CPPC describes a few methods for controlling CPU performance using
 * information from a per CPU table called CPC. This table is described in
 * the ACPI v5.0+ specification. The table consists of a list of
 * registers which may be memory mapped or hardware registers and also may
 * include some static integer values.
 *
 * CPU performance is on an abstract continuous scale as against a discretized
 * P-state scale which is tied to CPU frequency only. In brief, the basic
 * operation involves:
 *
 * - OS makes a CPU performance request. (Can provide min and max bounds)
 *
 * - Platform (such as BMC) is free to optimize request within requested bounds
 *   depending on power/thermal budgets etc.
 *
 * - Platform conveys its decision back to OS
 *
 * The communication between OS and platform occurs through another medium
 * called (PCC) Platform Communication Channel. This is a generic mailbox like
 * mechanism which includes doorbell semantics to indicate register updates.
 * See drivers/mailbox/pcc.c for details on PCC.
 *
 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
 * above specifications.
 */

#define pr_fmt(fmt)

#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/ktime.h>
#include <linux/rwsem.h>
#include <linux/wait.h>
#include <linux/topology.h>
#include <linux/dmi.h>
#include <linux/units.h>
#include <linux/unaligned.h>

#include <acpi/cppc_acpi.h>

struct cppc_pcc_data {};

/* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);

/*
 * The cpc_desc structure contains the ACPI register details
 * as described in the per CPU _CPC tables. The details
 * include the type of register (e.g. PCC, System IO, FFH etc.)
 * and destination addresses which lets us READ/WRITE CPU performance
 * information using the appropriate I/O methods.
 */
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);

/* pcc mapped address + header size + offset within PCC subspace */
#define GET_PCC_VADDR(offs, pcc_ss_id)

/* Check if a CPC register is in PCC */
#define CPC_IN_PCC(cpc)

/* Check if a CPC register is in FFH */
#define CPC_IN_FFH(cpc)

/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc)

/* Check if a CPC register is in SystemIo */
#define CPC_IN_SYSTEM_IO(cpc)

/* Evaluates to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg)

/* Evaluates to True if an optional cpc field is supported */
#define CPC_SUPPORTED(cpc)
/*
 * Arbitrary Retries in case the remote processor is slow to respond
 * to PCC commands. Keeping it high enough to cover emulators where
 * the processors run painfully slow.
 */
#define NUM_RETRIES

#define OVER_16BTS_MASK

#define define_one_cppc_ro(_name)

#define to_cpc_desc(a)

#define show_cppc_data(access_fn, struct_name, member_name)

show_cppc_data();
show_cppc_data();
show_cppc_data();
show_cppc_data();
show_cppc_data();
show_cppc_data();
show_cppc_data();

show_cppc_data();
show_cppc_data();

/* Check for valid access_width, otherwise, fallback to using bit_width */
#define GET_BIT_WIDTH(reg)

/* Shift and apply the mask for CPC reads/writes */
#define MASK_VAL_READ(reg, val)
#define MASK_VAL_WRITE(reg, prev_val, val)	\

static ssize_t show_feedback_ctrs(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{}
define_one_cppc_ro();

static struct attribute *cppc_attrs[] =;
ATTRIBUTE_GROUPS();

static const struct kobj_type cppc_ktype =;

static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
{}

/*
 * This function transfers the ownership of the PCC to the platform
 * So it must be called while holding write_lock(pcc_lock)
 */
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{}

static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{}

static struct mbox_client cppc_mbox_cl =;

static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
{}

bool acpi_cpc_valid(void)
{}
EXPORT_SYMBOL_GPL();

bool cppc_allow_fast_switch(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
 * @cpu: Find all CPUs that share a domain with cpu.
 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
{}
EXPORT_SYMBOL_GPL();

static int register_pcc_channel(int pcc_ss_idx)
{}

/**
 * cpc_ffh_supported() - check if FFH reading supported
 *
 * Check if the architecture has support for functional fixed hardware
 * read/write capability.
 *
 * Return: true for supported, false for not supported
 */
bool __weak cpc_ffh_supported(void)
{}

/**
 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
 *
 * Check if the architectural support for CPPC is present even
 * if the _OSC hasn't prescribed it
 *
 * Return: true for supported, false for not supported
 */
bool __weak cpc_supported_by_cpu(void)
{}

/**
 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
 *
 * Check and allocate the cppc_pcc_data memory.
 * In some processor configurations it is possible that same subspace
 * is shared between multiple CPUs. This is seen especially in CPUs
 * with hardware multi-threading support.
 *
 * Return: 0 for success, errno for failure
 */
static int pcc_data_alloc(int pcc_ss_id)
{}

/*
 * An example CPC table looks like the following.
 *
 *  Name (_CPC, Package() {
 *      17,							// NumEntries
 *      1,							// Revision
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},	// Highest Performance
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},	// Nominal Performance
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},	// Lowest Nonlinear Performance
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},	// Lowest Performance
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},	// Guaranteed Performance Register
 *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},	// Desired Performance Register
 *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
 *      ...
 *      ...
 *      ...
 *  }
 * Each Register() encodes how to access that specific register.
 * e.g. a sample PCC entry has the following encoding:
 *
 *  Register (
 *      PCC,	// AddressSpaceKeyword
 *      8,	// RegisterBitWidth
 *      8,	// RegisterBitOffset
 *      0x30,	// RegisterAddress
 *      9,	// AccessSize (subspace ID)
 *  )
 */

#ifndef arch_init_invariance_cppc
static inline void arch_init_invariance_cppc(void) { }
#endif

/**
 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_cppc_processor_probe(struct acpi_processor *pr)
{}
EXPORT_SYMBOL_GPL();

/**
 * acpi_cppc_processor_exit - Cleanup CPC structs.
 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
 *
 * Return: Void
 */
void acpi_cppc_processor_exit(struct acpi_processor *pr)
{}
EXPORT_SYMBOL_GPL();

/**
 * cpc_read_ffh() - Read FFH register
 * @cpunum:	CPU number to read
 * @reg:	cppc register information
 * @val:	place holder for return value
 *
 * Read bit_width bits from a specified address and bit_offset
 *
 * Return: 0 for success and error code
 */
int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{}

/**
 * cpc_write_ffh() - Write FFH register
 * @cpunum:	CPU number to write
 * @reg:	cppc register information
 * @val:	value to write
 *
 * Write value of bit_width bits to a specified address and bit_offset
 *
 * Return: 0 for success and error code
 */
int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{}

/*
 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 * as fast as possible. We have already mapped the PCC subspace during init, so
 * we can directly write to it.
 */

static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{}

static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{}

static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{}

/**
 * cppc_get_desired_perf - Get the desired performance register value.
 * @cpunum: CPU from which to get desired performance.
 * @desired_perf: Return address.
 *
 * Return: 0 for success, -EIO otherwise.
 */
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_nominal_perf - Get the nominal performance register value.
 * @cpunum: CPU from which to get nominal performance.
 * @nominal_perf: Return address.
 *
 * Return: 0 for success, -EIO otherwise.
 */
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{}

/**
 * cppc_get_highest_perf - Get the highest performance register value.
 * @cpunum: CPU from which to get highest performance.
 * @highest_perf: Return address.
 *
 * Return: 0 for success, -EIO otherwise.
 */
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_epp_perf - Get the epp register value.
 * @cpunum: CPU from which to get epp preference value.
 * @epp_perf: Return address.
 *
 * Return: 0 for success, -EIO otherwise.
 */
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_perf_caps - Get a CPU's performance capabilities.
 * @cpunum: CPU from which to get capabilities info.
 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
 *
 * Return: 0 for success with perf_caps populated else -ERRNO.
 */
int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
 *
 * CPPC has flexibility about how CPU performance counters are accessed.
 * One of the choices is PCC regions, which can have a high access latency. This
 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
 *
 * Return: true if any of the counters are in PCC regions, false otherwise
 */
bool cppc_perf_ctrs_in_pcc(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
 * @cpunum: CPU from which to read counters.
 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
 *
 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
 */
int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{}
EXPORT_SYMBOL_GPL();

/*
 * Set Energy Performance Preference Register value through
 * Performance Controls Interface
 */
int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_auto_sel_caps - Read autonomous selection register.
 * @cpunum : CPU from which to read register.
 * @perf_caps : struct where autonomous selection register value is updated.
 */
int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_set_auto_sel - Write autonomous selection register.
 * @cpu    : CPU to which to write register.
 * @enable : the desired value of autonomous selection resiter to be updated.
 */
int cppc_set_auto_sel(int cpu, bool enable)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_set_enable - Set to enable CPPC on the processor by writing the
 * Continuous Performance Control package EnableRegister field.
 * @cpu: CPU for which to enable CPPC register.
 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
 *
 * Return: 0 for success, -ERRNO or -EIO otherwise.
 */
int cppc_set_enable(int cpu, bool enable)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_set_perf - Set a CPU's performance controls.
 * @cpu: CPU for which to set performance controls.
 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
 *
 * Return: 0 for success, -ERRNO otherwise.
 */
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{}
EXPORT_SYMBOL_GPL();

/**
 * cppc_get_transition_latency - returns frequency transition latency in ns
 * @cpu_num: CPU number for per_cpu().
 *
 * ACPI CPPC does not explicitly specify how a platform can specify the
 * transition latency for performance change requests. The closest we have
 * is the timing information from the PCCT tables which provides the info
 * on the number and frequency of PCC commands the platform can handle.
 *
 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
 * then assume there is no latency.
 */
unsigned int cppc_get_transition_latency(int cpu_num)
{}
EXPORT_SYMBOL_GPL();

/* Minimum struct length needed for the DMI processor entry we want */
#define DMI_ENTRY_PROCESSOR_MIN_LENGTH

/* Offset in the DMI processor structure for the max frequency */
#define DMI_PROCESSOR_MAX_SPEED

/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{}

/* Look up the max frequency in DMI */
static u64 cppc_get_dmi_max_khz(void)
{}

/*
 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
 * use them to convert perf to freq and vice versa. The conversion is
 * extrapolated as an affine function passing by the 2 points:
 *  - (Low perf, Low freq)
 *  - (Nominal perf, Nominal freq)
 */
unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
{}
EXPORT_SYMBOL_GPL();

unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
{}
EXPORT_SYMBOL_GPL();