2019-06-01 04:08:55 -04:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2015-10-02 10:01:19 -04:00
|
|
|
/*
|
|
|
|
* CPPC (Collaborative Processor Performance Control) methods used
|
|
|
|
* by CPUfreq drivers.
|
|
|
|
*
|
|
|
|
* (C) Copyright 2014, 2015 Linaro Ltd.
|
|
|
|
* Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _CPPC_ACPI_H
|
|
|
|
#define _CPPC_ACPI_H
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
2021-03-16 11:54:03 -04:00
|
|
|
#include <linux/cpufreq.h>
|
2015-10-02 10:01:19 -04:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2016-06-16 17:09:38 -04:00
|
|
|
#include <acpi/pcc.h>
|
2015-10-02 10:01:19 -04:00
|
|
|
#include <acpi/processor.h>
|
|
|
|
|
2022-07-21 13:41:10 -04:00
|
|
|
/* CPPCv2 and CPPCv3 support */
|
2018-04-04 14:14:50 -04:00
|
|
|
#define CPPC_V2_REV 2
|
|
|
|
#define CPPC_V3_REV 3
|
|
|
|
#define CPPC_V2_NUM_ENT 21
|
|
|
|
#define CPPC_V3_NUM_ENT 23
|
2015-10-02 10:01:19 -04:00
|
|
|
|
2016-08-16 16:39:44 -04:00
|
|
|
#define PCC_CMD_COMPLETE_MASK (1 << 0)
|
|
|
|
#define PCC_ERROR_MASK (1 << 2)
|
|
|
|
|
2018-04-04 14:14:50 -04:00
|
|
|
#define MAX_CPC_REG_ENT 21
|
2015-10-02 10:01:19 -04:00
|
|
|
|
|
|
|
/* CPPC specific PCC commands. */
|
|
|
|
#define CMD_READ 0
|
|
|
|
#define CMD_WRITE 1
|
|
|
|
|
|
|
|
/* Each register has the folowing format. */
|
|
|
|
struct cpc_reg {
|
|
|
|
u8 descriptor;
|
|
|
|
u16 length;
|
|
|
|
u8 space_id;
|
|
|
|
u8 bit_width;
|
|
|
|
u8 bit_offset;
|
|
|
|
u8 access_width;
|
2021-01-07 06:17:15 -05:00
|
|
|
u64 address;
|
2015-10-02 10:01:19 -04:00
|
|
|
} __packed;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each entry in the CPC table is either
|
|
|
|
* of type ACPI_TYPE_BUFFER or
|
|
|
|
* ACPI_TYPE_INTEGER.
|
|
|
|
*/
|
|
|
|
struct cpc_register_resource {
|
|
|
|
acpi_object_type type;
|
2016-08-16 16:39:38 -04:00
|
|
|
u64 __iomem *sys_mem_vaddr;
|
2015-10-02 10:01:19 -04:00
|
|
|
union {
|
|
|
|
struct cpc_reg reg;
|
|
|
|
u64 int_value;
|
|
|
|
} cpc_entry;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Container to hold the CPC details for each CPU */
|
|
|
|
struct cpc_desc {
|
|
|
|
int num_entries;
|
|
|
|
int version;
|
|
|
|
int cpu_id;
|
2016-08-16 16:39:40 -04:00
|
|
|
int write_cmd_status;
|
|
|
|
int write_cmd_id;
|
2015-10-02 10:01:19 -04:00
|
|
|
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
|
|
|
|
struct acpi_psd_package domain_info;
|
2016-08-16 16:39:42 -04:00
|
|
|
struct kobject kobj;
|
2015-10-02 10:01:19 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* These are indexes into the per-cpu cpc_regs[]. Order is important. */
|
|
|
|
enum cppc_regs {
|
|
|
|
HIGHEST_PERF,
|
|
|
|
NOMINAL_PERF,
|
|
|
|
LOW_NON_LINEAR_PERF,
|
|
|
|
LOWEST_PERF,
|
|
|
|
GUARANTEED_PERF,
|
|
|
|
DESIRED_PERF,
|
|
|
|
MIN_PERF,
|
|
|
|
MAX_PERF,
|
|
|
|
PERF_REDUC_TOLERANCE,
|
|
|
|
TIME_WINDOW,
|
|
|
|
CTR_WRAP_TIME,
|
|
|
|
REFERENCE_CTR,
|
|
|
|
DELIVERED_CTR,
|
|
|
|
PERF_LIMITED,
|
|
|
|
ENABLE,
|
|
|
|
AUTO_SEL_ENABLE,
|
|
|
|
AUTO_ACT_WINDOW,
|
|
|
|
ENERGY_PERF,
|
|
|
|
REFERENCE_PERF,
|
2018-04-04 14:14:50 -04:00
|
|
|
LOWEST_FREQ,
|
|
|
|
NOMINAL_FREQ,
|
2015-10-02 10:01:19 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Categorization of registers as described
|
|
|
|
* in the ACPI v.5.1 spec.
|
|
|
|
* XXX: Only filling up ones which are used by governors
|
|
|
|
* today.
|
|
|
|
*/
|
|
|
|
struct cppc_perf_caps {
|
2018-10-15 13:37:19 -04:00
|
|
|
u32 guaranteed_perf;
|
2015-10-02 10:01:19 -04:00
|
|
|
u32 highest_perf;
|
|
|
|
u32 nominal_perf;
|
|
|
|
u32 lowest_perf;
|
2017-03-29 15:49:59 -04:00
|
|
|
u32 lowest_nonlinear_perf;
|
2018-04-04 14:14:50 -04:00
|
|
|
u32 lowest_freq;
|
|
|
|
u32 nominal_freq;
|
2023-01-31 04:00:06 -05:00
|
|
|
u32 energy_perf;
|
2023-03-07 06:27:36 -05:00
|
|
|
bool auto_sel;
|
2015-10-02 10:01:19 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cppc_perf_ctrls {
|
|
|
|
u32 max_perf;
|
|
|
|
u32 min_perf;
|
|
|
|
u32 desired_perf;
|
2023-01-31 04:00:06 -05:00
|
|
|
u32 energy_perf;
|
2015-10-02 10:01:19 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cppc_perf_fb_ctrs {
|
|
|
|
u64 reference;
|
|
|
|
u64 delivered;
|
2016-08-16 16:39:42 -04:00
|
|
|
u64 reference_perf;
|
2017-03-29 15:50:00 -04:00
|
|
|
u64 wraparound_time;
|
2015-10-02 10:01:19 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Per CPU container for runtime CPPC management. */
|
2016-09-01 16:37:11 -04:00
|
|
|
struct cppc_cpudata {
|
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing
functional issues (2) when CPUs are hotplugged out, due to per-cpu data
being improperly initialised.
(1) The amount of information needed for CPPC performance control in its
cpufreq driver depends on the domain (PSD) coordination type:
ANY: One set of CPPC control and capability data (e.g desired
performance, highest/lowest performance, etc) applies to all
CPUs in the domain.
ALL: Same as ANY. To be noted that this type is not currently
supported. When supported, information about which CPUs
belong to a domain is needed in order for frequency change
requests to be sent to each of them.
HW: It's necessary to store CPPC control and capability
information for all the CPUs. HW will then coordinate the
performance state based on their limitations and requests.
NONE: Same as HW. No HW coordination is expected.
Despite this, the previous initialisation code would indiscriminately
allocate memory for all CPUs (all_cpu_data) and unnecessarily
duplicate performance capabilities and the domain sharing mask and type
for each possible CPU.
(2) With the current per-cpu structure, when having ANY coordination,
the cppc_cpudata cpu information is not initialised (will remain 0)
for all CPUs in a policy, other than policy->cpu. When policy->cpu is
hotplugged out, the driver will incorrectly use the uninitialised (0)
value of the other CPUs when making frequency changes. Additionally,
the previous values stored in the perf_ctrls.desired_perf will be
lost when policy->cpu changes.
Therefore replace the array of per cpu data with a list. The memory for
each structure is allocated at policy init, where a single structure
can be allocated per policy, not per cpu. In order to accommodate the
struct list_head node in the cppc_cpudata structure, the now unused cpu
and cur_policy variables are removed.
For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1,
(4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows:
Before patch:
- ANY coordination:
total slack req alloc/free caller
0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810
0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808
128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070
768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4
After patch:
- ANY coordination:
total slack req alloc/free caller
256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410
0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274
Additional notes:
- A pointer to the policy's cppc_cpudata is stored in policy->driver_data
- Driver registration is skipped if _CPC entries are not present.
Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-12-14 07:38:23 -05:00
|
|
|
struct list_head node;
|
2015-10-02 10:01:19 -04:00
|
|
|
struct cppc_perf_caps perf_caps;
|
|
|
|
struct cppc_perf_ctrls perf_ctrls;
|
|
|
|
struct cppc_perf_fb_ctrs perf_fb_ctrs;
|
|
|
|
unsigned int shared_type;
|
|
|
|
cpumask_var_t shared_cpu_map;
|
|
|
|
};
|
|
|
|
|
2021-03-16 11:54:03 -04:00
|
|
|
#ifdef CONFIG_ACPI_CPPC_LIB
|
2019-02-16 22:54:14 -05:00
|
|
|
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
|
2021-09-04 09:51:45 -04:00
|
|
|
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
|
2024-01-19 04:04:57 -05:00
|
|
|
extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
|
2015-10-02 10:01:19 -04:00
|
|
|
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
|
|
|
|
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
|
2021-12-23 20:04:59 -05:00
|
|
|
extern int cppc_set_enable(int cpu, bool enable);
|
2015-10-02 10:01:19 -04:00
|
|
|
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
|
2022-09-12 16:37:22 -04:00
|
|
|
extern bool cppc_perf_ctrs_in_pcc(void);
|
2023-12-11 05:48:53 -05:00
|
|
|
extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
|
|
|
|
extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
|
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing
functional issues (2) when CPUs are hotplugged out, due to per-cpu data
being improperly initialised.
(1) The amount of information needed for CPPC performance control in its
cpufreq driver depends on the domain (PSD) coordination type:
ANY: One set of CPPC control and capability data (e.g desired
performance, highest/lowest performance, etc) applies to all
CPUs in the domain.
ALL: Same as ANY. To be noted that this type is not currently
supported. When supported, information about which CPUs
belong to a domain is needed in order for frequency change
requests to be sent to each of them.
HW: It's necessary to store CPPC control and capability
information for all the CPUs. HW will then coordinate the
performance state based on their limitations and requests.
NONE: Same as HW. No HW coordination is expected.
Despite this, the previous initialisation code would indiscriminately
allocate memory for all CPUs (all_cpu_data) and unnecessarily
duplicate performance capabilities and the domain sharing mask and type
for each possible CPU.
(2) With the current per-cpu structure, when having ANY coordination,
the cppc_cpudata cpu information is not initialised (will remain 0)
for all CPUs in a policy, other than policy->cpu. When policy->cpu is
hotplugged out, the driver will incorrectly use the uninitialised (0)
value of the other CPUs when making frequency changes. Additionally,
the previous values stored in the perf_ctrls.desired_perf will be
lost when policy->cpu changes.
Therefore replace the array of per cpu data with a list. The memory for
each structure is allocated at policy init, where a single structure
can be allocated per policy, not per cpu. In order to accommodate the
struct list_head node in the cppc_cpudata structure, the now unused cpu
and cur_policy variables are removed.
For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1,
(4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows:
Before patch:
- ANY coordination:
total slack req alloc/free caller
0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810
0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808
128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070
768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4
After patch:
- ANY coordination:
total slack req alloc/free caller
256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410
0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274
Additional notes:
- A pointer to the policy's cppc_cpudata is stored in policy->driver_data
- Driver registration is skipped if _CPC entries are not present.
Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-12-14 07:38:23 -05:00
|
|
|
extern bool acpi_cpc_valid(void);
|
2022-05-18 05:09:00 -04:00
|
|
|
extern bool cppc_allow_fast_switch(void);
|
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing
functional issues (2) when CPUs are hotplugged out, due to per-cpu data
being improperly initialised.
(1) The amount of information needed for CPPC performance control in its
cpufreq driver depends on the domain (PSD) coordination type:
ANY: One set of CPPC control and capability data (e.g desired
performance, highest/lowest performance, etc) applies to all
CPUs in the domain.
ALL: Same as ANY. To be noted that this type is not currently
supported. When supported, information about which CPUs
belong to a domain is needed in order for frequency change
requests to be sent to each of them.
HW: It's necessary to store CPPC control and capability
information for all the CPUs. HW will then coordinate the
performance state based on their limitations and requests.
NONE: Same as HW. No HW coordination is expected.
Despite this, the previous initialisation code would indiscriminately
allocate memory for all CPUs (all_cpu_data) and unnecessarily
duplicate performance capabilities and the domain sharing mask and type
for each possible CPU.
(2) With the current per-cpu structure, when having ANY coordination,
the cppc_cpudata cpu information is not initialised (will remain 0)
for all CPUs in a policy, other than policy->cpu. When policy->cpu is
hotplugged out, the driver will incorrectly use the uninitialised (0)
value of the other CPUs when making frequency changes. Additionally,
the previous values stored in the perf_ctrls.desired_perf will be
lost when policy->cpu changes.
Therefore replace the array of per cpu data with a list. The memory for
each structure is allocated at policy init, where a single structure
can be allocated per policy, not per cpu. In order to accommodate the
struct list_head node in the cppc_cpudata structure, the now unused cpu
and cur_policy variables are removed.
For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1,
(4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows:
Before patch:
- ANY coordination:
total slack req alloc/free caller
0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810
0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808
128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070
768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4
After patch:
- ANY coordination:
total slack req alloc/free caller
256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410
0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274
Additional notes:
- A pointer to the policy's cppc_cpudata is stored in policy->driver_data
- Driver registration is skipped if _CPC entries are not present.
Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-12-14 07:38:23 -05:00
|
|
|
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
|
2016-08-16 16:39:41 -04:00
|
|
|
extern unsigned int cppc_get_transition_latency(int cpu);
|
2018-12-04 18:34:56 -05:00
|
|
|
extern bool cpc_ffh_supported(void);
|
2022-07-05 14:29:15 -04:00
|
|
|
extern bool cpc_supported_by_cpu(void);
|
2018-12-04 18:34:56 -05:00
|
|
|
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
|
|
|
|
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
|
2023-01-31 04:00:06 -05:00
|
|
|
extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
|
|
|
|
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
|
2023-03-07 06:27:36 -05:00
|
|
|
extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
|
|
|
|
extern int cppc_set_auto_sel(int cpu, bool enable);
|
2021-03-16 11:54:03 -04:00
|
|
|
#else /* !CONFIG_ACPI_CPPC_LIB */
|
|
|
|
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2021-09-04 09:51:45 -04:00
|
|
|
static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2024-01-19 04:04:57 -05:00
|
|
|
static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2021-03-16 11:54:03 -04:00
|
|
|
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2021-12-23 20:04:59 -05:00
|
|
|
static inline int cppc_set_enable(int cpu, bool enable)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2021-03-16 11:54:03 -04:00
|
|
|
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2022-09-12 16:37:22 -04:00
|
|
|
static inline bool cppc_perf_ctrs_in_pcc(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-16 11:54:03 -04:00
|
|
|
static inline bool acpi_cpc_valid(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2022-05-18 05:09:00 -04:00
|
|
|
static inline bool cppc_allow_fast_switch(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-16 11:54:03 -04:00
|
|
|
static inline unsigned int cppc_get_transition_latency(int cpu)
|
|
|
|
{
|
|
|
|
return CPUFREQ_ETERNAL;
|
|
|
|
}
|
|
|
|
static inline bool cpc_ffh_supported(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2023-01-31 04:00:06 -05:00
|
|
|
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2023-03-07 06:27:36 -05:00
|
|
|
static inline int cppc_set_auto_sel(int cpu, bool enable)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
2021-03-16 11:54:03 -04:00
|
|
|
#endif /* !CONFIG_ACPI_CPPC_LIB */
|
2015-10-02 10:01:19 -04:00
|
|
|
|
|
|
|
#endif /* _CPPC_ACPI_H*/
|