linux/drivers/cpufreq/intel_pstate.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * intel_pstate.c: Native P state management for Intel processors
 *
 * (C) Copyright 2012 Intel Corporation
 * Author: Dirk Brandewie <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched/cpufreq.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>

#include <asm/cpu.h>
#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#include "../drivers/thermal/intel/thermal_interrupt.h"

#define INTEL_PSTATE_SAMPLING_INTERVAL

#define INTEL_CPUFREQ_TRANSITION_LATENCY
#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP
#define INTEL_CPUFREQ_TRANSITION_DELAY

#ifdef CONFIG_ACPI
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#endif

#define FRAC_BITS
#define int_tofp(X)
#define fp_toint(X)

#define ONE_EIGHTH_FP

#define EXT_BITS
#define EXT_FRAC_BITS
#define fp_ext_toint(X)
#define int_ext_tofp(X)

static inline int32_t mul_fp(int32_t x, int32_t y)
{}

static inline int32_t div_fp(s64 x, s64 y)
{}

static inline int ceiling_fp(int32_t x)
{}

static inline u64 mul_ext_fp(u64 x, u64 y)
{}

static inline u64 div_ext_fp(u64 x, u64 y)
{}

/**
 * struct sample -	Store performance sample
 * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
 *			performance during last sample period
 * @busy_scaled:	Scaled busy value which is used to calculate next
 *			P state. This can be different than core_avg_perf
 *			to account for cpu idle period
 * @aperf:		Difference of actual performance frequency clock count
 *			read from APERF MSR between last and current sample
 * @mperf:		Difference of maximum performance frequency clock count
 *			read from MPERF MSR between last and current sample
 * @tsc:		Difference of time stamp counter between last and
 *			current sample
 * @time:		Current time from scheduler
 *
 * This structure is used in the cpudata structure to store performance sample
 * data for choosing next P State.
 */
struct sample {};

/**
 * struct pstate_data - Store P state data
 * @current_pstate:	Current requested P state
 * @min_pstate:		Min P state possible for this platform
 * @max_pstate:		Max P state possible for this platform
 * @max_pstate_physical:This is physical Max P state for a processor
 *			This can be higher than the max_pstate which can
 *			be limited by platform thermal design power limits
 * @perf_ctl_scaling:	PERF_CTL P-state to frequency scaling factor
 * @scaling:		Scaling factor between performance and frequency
 * @turbo_pstate:	Max Turbo P state possible for this platform
 * @min_freq:		@min_pstate frequency in cpufreq units
 * @max_freq:		@max_pstate frequency in cpufreq units
 * @turbo_freq:		@turbo_pstate frequency in cpufreq units
 *
 * Stores the per cpu model P state limits and current P state.
 */
struct pstate_data {};

/**
 * struct vid_data -	Stores voltage information data
 * @min:		VID data for this platform corresponding to
 *			the lowest P state
 * @max:		VID data corresponding to the highest P State.
 * @turbo:		VID data for turbo P state
 * @ratio:		Ratio of (vid max - vid min) /
 *			(max P state - Min P State)
 *
 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
 * This data is used in Atom platforms, where in addition to target P state,
 * the voltage data needs to be specified to select next P State.
 */
struct vid_data {};

/**
 * struct global_params - Global parameters, mostly tunable via sysfs.
 * @no_turbo:		Whether or not to use turbo P-states.
 * @turbo_disabled:	Whether or not turbo P-states are available at all,
 *			based on the MSR_IA32_MISC_ENABLE value and whether or
 *			not the maximum reported turbo P-state is different from
 *			the maximum reported non-turbo one.
 * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
 *			P-state capacity.
 * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
 *			P-state capacity.
 */
struct global_params {};

/**
 * struct cpudata -	Per CPU instance data storage
 * @cpu:		CPU number for this instance data
 * @policy:		CPUFreq policy value
 * @update_util:	CPUFreq utility callback information
 * @update_util_set:	CPUFreq utility callback is set
 * @iowait_boost:	iowait-related boost fraction
 * @last_update:	Time of the last update.
 * @pstate:		Stores P state limits for this CPU
 * @vid:		Stores VID limits for this CPU
 * @last_sample_time:	Last Sample time
 * @aperf_mperf_shift:	APERF vs MPERF counting frequency difference
 * @prev_aperf:		Last APERF value read from APERF MSR
 * @prev_mperf:		Last MPERF value read from MPERF MSR
 * @prev_tsc:		Last timestamp counter (TSC) value
 * @sample:		Storage for storing last Sample data
 * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
 * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
 * @acpi_perf_data:	Stores ACPI perf information read from _PSS
 * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
 * @epp_powersave:	Last saved HWP energy performance preference
 *			(EPP) or energy performance bias (EPB),
 *			when policy switched to performance
 * @epp_policy:		Last saved policy used to set EPP/EPB
 * @epp_default:	Power on default HWP energy performance
 *			preference/bias
 * @epp_cached:		Cached HWP energy-performance preference value
 * @hwp_req_cached:	Cached value of the last HWP Request MSR
 * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
 * @last_io_update:	Last time when IO wake flag was set
 * @sched_flags:	Store scheduler flags for possible cross CPU update
 * @hwp_boost_min:	Last HWP boosted min performance
 * @suspended:		Whether or not the driver has been suspended.
 * @hwp_notify_work:	workqueue for HWP notifications.
 *
 * This structure stores per CPU instance data for all CPUs.
 */
struct cpudata {};

static struct cpudata **all_cpu_data;

/**
 * struct pstate_funcs - Per CPU model specific callbacks
 * @get_max:		Callback to get maximum non turbo effective P state
 * @get_max_physical:	Callback to get maximum non turbo physical P state
 * @get_min:		Callback to get minimum P state
 * @get_turbo:		Callback to get turbo P state
 * @get_scaling:	Callback to get frequency scaling factor
 * @get_cpu_scaling:	Get frequency scaling factor for a given cpu
 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
 * @get_val:		Callback to convert P state to actual MSR write value
 * @get_vid:		Callback to get VID data for Atom platforms
 *
 * Core and Atom CPU models have different way to get P State limits. This
 * structure is used to store those callbacks.
 */
struct pstate_funcs {};

static struct pstate_funcs pstate_funcs __read_mostly;

static bool hwp_active __ro_after_init;
static int hwp_mode_bdw __ro_after_init;
static bool per_cpu_limits __ro_after_init;
static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;

static struct cpufreq_driver *intel_pstate_driver __read_mostly;

#define HYBRID_SCALING_FACTOR
#define HYBRID_SCALING_FACTOR_MTL
#define HYBRID_SCALING_FACTOR_LNL

static int hybrid_scaling_factor =;

static inline int core_get_scaling(void)
{}

#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif

static struct global_params global;

static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);

#ifdef CONFIG_ACPI

static bool intel_pstate_acpi_pm_profile_server(void)
{}

static bool intel_pstate_get_ppc_enable_status(void)
{}

#ifdef CONFIG_ACPI_CPPC_LIB

/* The work item is needed to avoid CPU hotplug locking issues */
static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
{}

static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);

#define CPPC_MAX_PERF

static void intel_pstate_set_itmt_prio(int cpu)
{}

static int intel_pstate_get_cppc_guaranteed(int cpu)
{}

static int intel_pstate_cppc_get_scaling(int cpu)
{}

#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
}
#endif /* CONFIG_ACPI_CPPC_LIB */

static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{}

static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
}

static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}

static inline bool intel_pstate_acpi_pm_profile_server(void)
{
	return false;
}
#endif /* CONFIG_ACPI */

#ifndef CONFIG_ACPI_CPPC_LIB
static inline int intel_pstate_get_cppc_guaranteed(int cpu)
{
	return -ENOTSUPP;
}

static int intel_pstate_cppc_get_scaling(int cpu)
{
	return core_get_scaling();
}
#endif /* CONFIG_ACPI_CPPC_LIB */

static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
					unsigned int relation)
{}

static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
{}

/**
 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
 * @cpu: Target CPU.
 *
 * On hybrid processors, HWP may expose more performance levels than there are
 * P-states accessible through the PERF_CTL interface.  If that happens, the
 * scaling factor between HWP performance levels and CPU frequency will be less
 * than the scaling factor between P-state values and CPU frequency.
 *
 * In that case, adjust the CPU parameters used in computations accordingly.
 */
static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
{}

static bool turbo_is_disabled(void)
{}

static int min_perf_pct_min(void)
{}

static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{}

static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{}

static int intel_pstate_set_epb(int cpu, s16 pref)
{}

/*
 * EPP/EPB display strings corresponding to EPP index in the
 * energy_perf_strings[]
 *	index		String
 *-------------------------------------
 *	0		default
 *	1		performance
 *	2		balance_performance
 *	3		balance_power
 *	4		power
 */

enum energy_perf_value_index {};

static const char * const energy_perf_strings[] =;
static unsigned int epp_values[] =;

static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
{}

static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
{}

static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
					      int pref_index, bool use_raw,
					      u32 raw_epp)
{}

static ssize_t show_energy_performance_available_preferences(
				struct cpufreq_policy *policy, char *buf)
{}

cpufreq_freq_attr_ro();

static struct cpufreq_driver intel_pstate;

static ssize_t store_energy_performance_preference(
		struct cpufreq_policy *policy, const char *buf, size_t count)
{}

static ssize_t show_energy_performance_preference(
				struct cpufreq_policy *policy, char *buf)
{}

cpufreq_freq_attr_rw();

static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
{}

cpufreq_freq_attr_ro();

static struct freq_attr *hwp_cpufreq_attrs[] =;

static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{}

static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
{}

static void intel_pstate_hwp_set(unsigned int cpu)
{}

static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);

static void intel_pstate_hwp_offline(struct cpudata *cpu)
{}

#define POWER_CTL_EE_ENABLE
#define POWER_CTL_EE_DISABLE

static int power_ctl_ee_state;

static void set_power_ctl_ee_state(bool input)
{}

static void intel_pstate_hwp_enable(struct cpudata *cpudata);

static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{}

static int intel_pstate_suspend(struct cpufreq_policy *policy)
{}

static int intel_pstate_resume(struct cpufreq_policy *policy)
{}

static void intel_pstate_update_policies(void)
{}

static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
					   struct cpufreq_policy *policy)
{}

static void intel_pstate_update_limits(unsigned int cpu)
{}

static void intel_pstate_update_limits_for_all(void)
{}

/************************** sysfs begin ************************/
#define show_one(file_name, object)

static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size);

static ssize_t show_status(struct kobject *kobj,
			   struct kobj_attribute *attr, char *buf)
{}

static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
			    const char *buf, size_t count)
{}

static ssize_t show_turbo_pct(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{}

static ssize_t show_num_pstates(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{}

static ssize_t show_no_turbo(struct kobject *kobj,
			     struct kobj_attribute *attr, char *buf)
{}

static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
			      const char *buf, size_t count)
{}

static void update_qos_request(enum freq_qos_req_type type)
{}

static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
				  const char *buf, size_t count)
{}

static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
				  const char *buf, size_t count)
{}

static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{}

static ssize_t store_hwp_dynamic_boost(struct kobject *a,
				       struct kobj_attribute *b,
				       const char *buf, size_t count)
{}

static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
				      char *buf)
{}

static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
				       const char *buf, size_t count)
{}

show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);

define_one_global_rw();
define_one_global_rw();
define_one_global_rw();
define_one_global_rw();
define_one_global_ro();
define_one_global_ro();
define_one_global_rw();
define_one_global_rw();

static struct attribute *intel_pstate_attributes[] =;

static const struct attribute_group intel_pstate_attr_group =;

static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];

static struct kobject *intel_pstate_kobject;

static void __init intel_pstate_sysfs_expose_params(void)
{}

static void __init intel_pstate_sysfs_remove(void)
{}

static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
{}

static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
{}

/************************** sysfs end ************************/

static void intel_pstate_notify_work(struct work_struct *work)
{}

static DEFINE_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;

#define HWP_GUARANTEED_PERF_CHANGE_STATUS
#define HWP_HIGHEST_PERF_CHANGE_STATUS

void notify_hwp_interrupt(void)
{}

static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{}

#define HWP_GUARANTEED_PERF_CHANGE_REQ
#define HWP_HIGHEST_PERF_CHANGE_REQ

static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{}

static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
{}

static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{}

static int atom_get_min_pstate(int not_used)
{}

static int atom_get_max_pstate(int not_used)
{}

static int atom_get_turbo_pstate(int not_used)
{}

static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{}

static int silvermont_get_scaling(void)
{}

static int airmont_get_scaling(void)
{}

static void atom_get_vid(struct cpudata *cpudata)
{}

static int core_get_min_pstate(int cpu)
{}

static int core_get_max_pstate_physical(int cpu)
{}

static int core_get_tdp_ratio(int cpu, u64 plat_info)
{}

static int core_get_max_pstate(int cpu)
{}

static int core_get_turbo_pstate(int cpu)
{}

static u64 core_get_val(struct cpudata *cpudata, int pstate)
{}

static int knl_get_aperf_mperf_shift(void)
{}

static int knl_get_turbo_pstate(int cpu)
{}

static void hybrid_get_type(void *data)
{}

static int hwp_get_cpu_scaling(int cpu)
{}

static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{}

static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{}

static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{}

/*
 * Long hold time will keep high perf limits for long time,
 * which negatively impacts perf/watt for some workloads,
 * like specpower. 3ms is based on experiements on some
 * workoads.
 */
static int hwp_boost_hold_time_ns =;

static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
{}

static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
{}

static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
						      u64 time)
{}

static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
						u64 time, unsigned int flags)
{}

static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{}

static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
{}

static inline int32_t get_avg_frequency(struct cpudata *cpu)
{}

static inline int32_t get_avg_pstate(struct cpudata *cpu)
{}

static inline int32_t get_target_pstate(struct cpudata *cpu)
{}

static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{}

static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{}

static void intel_pstate_adjust_pstate(struct cpudata *cpu)
{}

static void intel_pstate_update_util(struct update_util_data *data, u64 time,
				     unsigned int flags)
{}

static struct pstate_funcs core_funcs =;

static const struct pstate_funcs silvermont_funcs =;

static const struct pstate_funcs airmont_funcs =;

static const struct pstate_funcs knl_funcs =;

#define X86_MATCH(vfm, policy)

static const struct x86_cpu_id intel_pstate_cpu_ids[] =;
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst =;
#endif

static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] =;

static int intel_pstate_init_cpu(unsigned int cpunum)
{}

static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{}

static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{}

static int intel_pstate_get_max_freq(struct cpudata *cpu)
{}

static void intel_pstate_update_perf_limits(struct cpudata *cpu,
					    unsigned int policy_min,
					    unsigned int policy_max)
{}

static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{}

static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
					   struct cpufreq_policy_data *policy)
{}

static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
					   struct cpufreq_policy_data *policy)
{}

static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
{}

static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
{}

static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
{}

static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
{}

static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{}

static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
{}

static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{}

static struct cpufreq_driver intel_pstate =;

static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{}

/* Use of trace in passive mode:
 *
 * In passive mode the trace core_busy field (also known as the
 * performance field, and lablelled as such on the graphs; also known as
 * core_avg_perf) is not needed and so is re-assigned to indicate if the
 * driver call was via the normal or fast switch path. Various graphs
 * output from the intel_pstate_tracer.py utility that include core_busy
 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
 * so we use 10 to indicate the normal path through the driver, and
 * 90 to indicate the fast switch path through the driver.
 * The scaled_busy field is not used, and is set to 0.
 */

#define INTEL_PSTATE_TRACE_TARGET
#define INTEL_PSTATE_TRACE_FAST_SWITCH

static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
{}

static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
				     u32 desired, bool fast_switch)
{}

static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
					  u32 target_pstate, bool fast_switch)
{}

static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
				       int target_pstate, bool fast_switch)
{}

static int intel_cpufreq_target(struct cpufreq_policy *policy,
				unsigned int target_freq,
				unsigned int relation)
{}

static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
					      unsigned int target_freq)
{}

static void intel_cpufreq_adjust_perf(unsigned int cpunum,
				      unsigned long min_perf,
				      unsigned long target_perf,
				      unsigned long capacity)
{}

static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{}

static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{}

static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
{}

static struct cpufreq_driver intel_cpufreq =;

static struct cpufreq_driver *default_driver;

static void intel_pstate_driver_cleanup(void)
{}

static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{}

static ssize_t intel_pstate_show_status(char *buf)
{}

static int intel_pstate_update_status(const char *buf, size_t size)
{}

static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
static unsigned int force_load __initdata;

static int __init intel_pstate_msrs_not_valid(void)
{}

static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{}

#ifdef CONFIG_ACPI

static bool __init intel_pstate_no_acpi_pss(void)
{}

static bool __init intel_pstate_no_acpi_pcch(void)
{}

static bool __init intel_pstate_has_acpi_ppc(void)
{}

enum {};

/* Hardware vendor-specific info that has its own power management modes */
static struct acpi_platform_list plat_info[] __initdata =;

#define BITMASK_OOB

static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{}

static void intel_pstate_request_control_from_smm(void)
{}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */

#define INTEL_PSTATE_HWP_BROADWELL

#define X86_MATCH_HWP(vfm, hwp_mode)

static const struct x86_cpu_id hwp_support_ids[] __initconst =;

static bool intel_pstate_hwp_is_enabled(void)
{}

#define POWERSAVE_MASK
#define BALANCE_POWER_MASK
#define BALANCE_PERFORMANCE_MASK
#define PERFORMANCE_MASK

#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance)

#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf)

static const struct x86_cpu_id intel_epp_default[] =;

static const struct x86_cpu_id intel_hybrid_scaling_factor[] =;

static int __init intel_pstate_init(void)
{}
device_initcall(intel_pstate_init);

static int __init intel_pstate_setup(char *str)
{}
early_param();

MODULE_AUTHOR();
MODULE_DESCRIPTION();