soc: qcom: Separate kryo l2 accessors from PMU driver

The driver provides kernel level API for other drivers
to access the MSM8996 L2 cache registers.
Separating the L2 access code from the PMU driver and
making it public to allow other drivers use it.
The accesses must be separated with a single spinlock,
maintained in this driver.

Signed-off-by: Ilia Lin <ilialin@codeaurora.org>
Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Link: https://lore.kernel.org/r/1593766185-16346-2-git-send-email-loic.poulain@linaro.org
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
This commit is contained in:
Ilia Lin 2020-07-03 10:49:41 +02:00 committed by Stephen Boyd
parent dbb988b4e7
commit 6d0efeb14b
6 changed files with 99 additions and 66 deletions

View File

@ -82,6 +82,7 @@ config FSL_IMX8_DDR_PMU
config QCOM_L2_PMU config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU" bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI depends on ARCH_QCOM && ARM64 && ACPI
select QCOM_KRYO_L2_ACCESSORS
help help
Provides support for the L2 cache performance monitor unit (PMU) Provides support for the L2 cache performance monitor unit (PMU)
in Qualcomm Technologies processors. in Qualcomm Technologies processors.

View File

@ -23,6 +23,7 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/local64.h> #include <asm/local64.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <soc/qcom/kryo-l2-accessors.h>
#define MAX_L2_CTRS 9 #define MAX_L2_CTRS 9
@ -79,8 +80,6 @@
#define L2_COUNTER_RELOAD BIT_ULL(31) #define L2_COUNTER_RELOAD BIT_ULL(31)
#define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
#define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
@ -99,48 +98,7 @@
#define L2_EVENT_STREX 0x421 #define L2_EVENT_STREX 0x421
#define L2_EVENT_CLREX 0x422 #define L2_EVENT_CLREX 0x422
static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
* set_l2_indirect_reg: write value to an L2 register
* @reg: Address of L2 register.
* @value: Value to be written to register.
*
* Use architecturally required barriers for ordering between system register
* accesses
*/
static void set_l2_indirect_reg(u64 reg, u64 val)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2_access_lock, flags);
write_sysreg_s(reg, L2CPUSRSELR_EL1);
isb();
write_sysreg_s(val, L2CPUSRDR_EL1);
isb();
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
}
/**
* get_l2_indirect_reg: read an L2 register value
* @reg: Address of L2 register.
*
* Use architecturally required barriers for ordering between system register
* accesses
*/
static u64 get_l2_indirect_reg(u64 reg)
{
u64 val;
unsigned long flags;
raw_spin_lock_irqsave(&l2_access_lock, flags);
write_sysreg_s(reg, L2CPUSRSELR_EL1);
isb();
val = read_sysreg_s(L2CPUSRDR_EL1);
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
return val;
}
struct cluster_pmu; struct cluster_pmu;
@ -211,28 +169,28 @@ static inline struct cluster_pmu *get_cluster_pmu(
static void cluster_pmu_reset(void) static void cluster_pmu_reset(void)
{ {
/* Reset all counters */ /* Reset all counters */
set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
} }
static inline void cluster_pmu_enable(void) static inline void cluster_pmu_enable(void)
{ {
set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
} }
static inline void cluster_pmu_disable(void) static inline void cluster_pmu_disable(void)
{ {
set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
} }
static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
{ {
if (idx == l2_cycle_ctr_idx) if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCCNTR, value); kryo_l2_set_indirect_reg(L2PMCCNTR, value);
else else
set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
} }
static inline u64 cluster_pmu_counter_get_value(u32 idx) static inline u64 cluster_pmu_counter_get_value(u32 idx)
@ -240,46 +198,46 @@ static inline u64 cluster_pmu_counter_get_value(u32 idx)
u64 value; u64 value;
if (idx == l2_cycle_ctr_idx) if (idx == l2_cycle_ctr_idx)
value = get_l2_indirect_reg(L2PMCCNTR); value = kryo_l2_get_indirect_reg(L2PMCCNTR);
else else
value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
return value; return value;
} }
static inline void cluster_pmu_counter_enable(u32 idx) static inline void cluster_pmu_counter_enable(u32 idx)
{ {
set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
} }
static inline void cluster_pmu_counter_disable(u32 idx) static inline void cluster_pmu_counter_disable(u32 idx)
{ {
set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
} }
static inline void cluster_pmu_counter_enable_interrupt(u32 idx) static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
{ {
set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
} }
static inline void cluster_pmu_counter_disable_interrupt(u32 idx) static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
{ {
set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
} }
static inline void cluster_pmu_set_evccntcr(u32 val) static inline void cluster_pmu_set_evccntcr(u32 val)
{ {
set_l2_indirect_reg(L2PMCCNTCR, val); kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
} }
static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
{ {
set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
} }
static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
{ {
set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
} }
static void cluster_pmu_set_resr(struct cluster_pmu *cluster, static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
@ -295,11 +253,11 @@ static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
spin_lock_irqsave(&cluster->pmu_lock, flags); spin_lock_irqsave(&cluster->pmu_lock, flags);
resr_val = get_l2_indirect_reg(L2PMRESR); resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
resr_val &= ~(L2PMRESR_GROUP_MASK << shift); resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
resr_val |= field; resr_val |= field;
resr_val |= L2PMRESR_EN; resr_val |= L2PMRESR_EN;
set_l2_indirect_reg(L2PMRESR, resr_val); kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
spin_unlock_irqrestore(&cluster->pmu_lock, flags); spin_unlock_irqrestore(&cluster->pmu_lock, flags);
} }
@ -315,14 +273,14 @@ static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
L2PMXEVFILTER_ORGFILTER_IDINDEP | L2PMXEVFILTER_ORGFILTER_IDINDEP |
L2PMXEVFILTER_ORGFILTER_ALL; L2PMXEVFILTER_ORGFILTER_ALL;
set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
} }
static inline u32 cluster_pmu_getreset_ovsr(void) static inline u32 cluster_pmu_getreset_ovsr(void)
{ {
u32 result = get_l2_indirect_reg(L2PMOVSSET); u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
set_l2_indirect_reg(L2PMOVSCLR, result); kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
return result; return result;
} }
@ -767,7 +725,7 @@ static int get_num_counters(void)
{ {
int val; int val;
val = get_l2_indirect_reg(L2PMCR); val = kryo_l2_get_indirect_reg(L2PMCR);
/* /*
* Read number of counters from L2PMCR and add 1 * Read number of counters from L2PMCR and add 1

View File

@ -53,6 +53,10 @@ config QCOM_LLCC
SDM845. This provides interfaces to clients that use the LLCC. SDM845. This provides interfaces to clients that use the LLCC.
Say yes here to enable LLCC slice driver. Say yes here to enable LLCC slice driver.
config QCOM_KRYO_L2_ACCESSORS
bool
depends on ARCH_QCOM && ARM64 || COMPILE_TEST
config QCOM_MDT_LOADER config QCOM_MDT_LOADER
tristate tristate
select QCOM_SCM select QCOM_SCM

View File

@ -24,3 +24,4 @@ obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o

View File

@ -0,0 +1,57 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/spinlock.h>
#include <asm/barrier.h>
#include <asm/sysreg.h>
#include <soc/qcom/kryo-l2-accessors.h>
#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
* kryo_l2_set_indirect_reg() - write value to an L2 register
* @reg: Address of L2 register.
* @value: Value to be written to register.
*
* Use architecturally required barriers for ordering between system register
* accesses, and system registers with respect to device memory
*/
void kryo_l2_set_indirect_reg(u64 reg, u64 val)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2_access_lock, flags);
write_sysreg_s(reg, L2CPUSRSELR_EL1);
isb();
write_sysreg_s(val, L2CPUSRDR_EL1);
isb();
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
}
EXPORT_SYMBOL(kryo_l2_set_indirect_reg);
/**
* kryo_l2_get_indirect_reg() - read an L2 register value
* @reg: Address of L2 register.
*
* Use architecturally required barriers for ordering between system register
* accesses, and system registers with respect to device memory
*/
u64 kryo_l2_get_indirect_reg(u64 reg)
{
u64 val;
unsigned long flags;
raw_spin_lock_irqsave(&l2_access_lock, flags);
write_sysreg_s(reg, L2CPUSRSELR_EL1);
isb();
val = read_sysreg_s(L2CPUSRDR_EL1);
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
return val;
}
EXPORT_SYMBOL(kryo_l2_get_indirect_reg);

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
#define __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
void kryo_l2_set_indirect_reg(u64 reg, u64 val);
u64 kryo_l2_get_indirect_reg(u64 reg);
#endif