Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torvalds/linux.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2022-05-10 12:57:07 +0300
committerMarc Zyngier <maz@kernel.org>2022-05-15 13:24:17 +0300
commit3cb8a091a7016be0ee5420ff6c1928972e3ce23e (patch)
treeadfd29ea90c0ed269558b09fdfd5f50efd80daf5 /arch
parentb2d229d4ddb17db541098b83524d901257e93845 (diff)
KVM: arm64: Wrapper for getting pmu_events
Eases migrating away from using hyp data and simplifies the code. No functional change intended. Reviewed-by: Oliver Upton <oupton@google.com> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220510095710.148178-2-tabba@google.com
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kvm/pmu.c42
1 files changed, 26 insertions, 16 deletions
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 03a6c1f4a09a..4bd38ff34221 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -25,21 +25,31 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
return (attr->exclude_host != attr->exclude_guest);
}
+static struct kvm_pmu_events *kvm_get_pmu_events(void)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+
+ if (!ctx)
+ return NULL;
+
+ return &ctx->pmu_events;
+}
+
/*
* Add events to track that we may want to switch at guest entry/exit
* time.
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
- ctx->pmu_events.events_host |= set;
+ pmu->events_host |= set;
if (!attr->exclude_guest)
- ctx->pmu_events.events_guest |= set;
+ pmu->events_guest |= set;
}
/*
@@ -47,13 +57,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+ struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !ctx)
+ if (!kvm_arm_support_pmu_v3() || !pmu)
return;
- ctx->pmu_events.events_host &= ~clr;
- ctx->pmu_events.events_guest &= ~clr;
+ pmu->events_host &= ~clr;
+ pmu->events_guest &= ~clr;
}
#define PMEVTYPER_READ_CASE(idx) \
@@ -169,16 +179,16 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
u32 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
preempt_disable();
- host = this_cpu_ptr_hyp_sym(kvm_host_data);
- events_guest = host->pmu_events.events_guest;
- events_host = host->pmu_events.events_host;
+ pmu = kvm_get_pmu_events();
+ events_guest = pmu->events_guest;
+ events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
@@ -190,15 +200,15 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
u32 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
- host = this_cpu_ptr_hyp_sym(kvm_host_data);
- events_guest = host->pmu_events.events_guest;
- events_host = host->pmu_events.events_host;
+ pmu = kvm_get_pmu_events();
+ events_guest = pmu->events_guest;
+ events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_host);
kvm_vcpu_pmu_disable_el0(events_guest);