@@ -556,16 +556,11 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_common common;
};
struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
+ struct kvm_vcpu_stat_common common;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
@@ -29,18 +29,18 @@
#include "trace.h"
struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
VCPU_STAT("mmio_exit_user", mmio_exit_user),
VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
VCPU_STAT("exits", exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
{ NULL }
};
@@ -143,10 +143,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_common common;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_common common;
u64 wait_exits;
u64 cache_exits;
u64 signal_exits;
@@ -178,12 +179,6 @@ struct kvm_vcpu_stat {
u64 vz_cpucfg_exits;
#endif
#endif
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
};
struct kvm_arch_memory_slot {
@@ -71,12 +71,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
#endif
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
{NULL}
};
@@ -80,12 +80,13 @@ struct kvmppc_book3s_shadow_vcpu;
struct kvm_nested_guest;
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_common common;
ulong num_2M_pages;
ulong num_1G_pages;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_common common;
u64 sum_exits;
u64 mmio_exits;
u64 signal_exits;
@@ -101,14 +102,8 @@ struct kvm_vcpu_stat {
u64 emulated_inst_exits;
u64 dec_exits;
u64 ext_intr_exits;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 halt_wait_ns;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
u64 halt_successful_wait;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 dbell_exits;
u64 gdbell_exits;
u64 ld;
@@ -47,14 +47,14 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("dec", dec_exits),
VCPU_STAT("ext_intr", ext_intr_exits),
VCPU_STAT("queue_intr", queue_intr),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
VCPU_STAT("halt_wait_ns", halt_wait_ns),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
VCPU_STAT("halt_successful_wait", halt_successful_wait),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
VCPU_STAT("pf_storage", pf_storage),
VCPU_STAT("sp_storage", sp_storage),
VCPU_STAT("pf_instruc", pf_instruc),
@@ -236,7 +236,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp))
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.common.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -3885,7 +3885,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
- ++vc->runner->stat.halt_attempted_poll;
+ ++vc->runner->stat.common.halt_attempted_poll;
vc->vcore_state = VCORE_POLLING;
spin_unlock(&vc->lock);
@@ -3902,7 +3902,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_INACTIVE;
if (!do_sleep) {
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.common.halt_successful_poll;
goto out;
}
}
@@ -3914,7 +3914,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
do_sleep = 0;
/* If we polled, count this as a successful poll */
if (vc->halt_poll_ns)
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.common.halt_successful_poll;
goto out;
}
@@ -3941,13 +3941,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
ktime_to_ns(cur) - ktime_to_ns(start_wait);
/* Attribute failed poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_fail_ns +=
+ vc->runner->stat.common.halt_poll_fail_ns +=
ktime_to_ns(start_wait) -
ktime_to_ns(start_poll);
} else {
/* Attribute successful poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_success_ns +=
+ vc->runner->stat.common.halt_poll_success_ns +=
ktime_to_ns(cur) -
ktime_to_ns(start_poll);
}
@@ -493,7 +493,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.common.halt_wakeup++;
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
@@ -378,7 +378,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.common.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
return kvmppc_h_pr_logical_ci_load(vcpu);
@@ -49,15 +49,15 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("inst_emu", emulated_inst_exits),
VCPU_STAT("dec", dec_exits),
VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
VCPU_STAT("doorbell", dbell_exits),
VCPU_STAT("guest doorbell", gdbell_exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
+ VM_STAT_COM("remote_tlb_flush", remote_tlb_flush),
{ NULL }
};
@@ -361,6 +361,7 @@ struct sie_page {
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_common common;
u64 exit_userspace;
u64 exit_null;
u64 exit_external_request;
@@ -370,13 +371,7 @@ struct kvm_vcpu_stat {
u64 exit_validity;
u64 exit_instruction;
u64 exit_pei;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
u64 halt_no_poll_steal;
- u64 halt_wakeup;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 instruction_lctl;
u64 instruction_lctlg;
u64 instruction_stctl;
@@ -755,12 +750,12 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
+ struct kvm_vm_stat_common common;
u64 inject_io;
u64 inject_float_mchk;
u64 inject_pfault_done;
u64 inject_service_signal;
u64 inject_virtio;
- u64 remote_tlb_flush;
};
struct kvm_arch_memory_slot {
@@ -72,13 +72,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("exit_program_interruption", exit_program_interruption),
VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
VCPU_STAT("exit_operation_exception", exit_operation_exception),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
VCPU_STAT("instruction_lctlg", instruction_lctlg),
VCPU_STAT("instruction_lctl", instruction_lctl),
VCPU_STAT("instruction_stctl", instruction_stctl),
@@ -1127,6 +1127,7 @@ struct kvm_arch {
};
struct kvm_vm_stat {
+ struct kvm_vm_stat_common common;
ulong mmu_shadow_zapped;
ulong mmu_pte_write;
ulong mmu_pde_zapped;
@@ -1134,13 +1135,13 @@ struct kvm_vm_stat {
ulong mmu_recycled;
ulong mmu_cache_miss;
ulong mmu_unsync;
- ulong remote_tlb_flush;
ulong lpages;
ulong nx_lpage_splits;
ulong max_mmu_page_hash_collisions;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_common common;
u64 pf_fixed;
u64 pf_guest;
u64 tlb_flush;
@@ -1154,10 +1155,6 @@ struct kvm_vcpu_stat {
u64 nmi_window_exits;
u64 l1d_flush;
u64 halt_exits;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 request_irq_exits;
u64 irq_exits;
u64 host_state_reload;
@@ -1168,8 +1165,6 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 nested_run;
u64 directed_yield_attempted;
u64 directed_yield_successful;
@@ -229,10 +229,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("irq_window", irq_window_exits),
VCPU_STAT("nmi_window", nmi_window_exits),
VCPU_STAT("halt_exits", halt_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT_COM("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT_COM("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT_COM("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT_COM("halt_wakeup", halt_wakeup),
VCPU_STAT("hypercalls", hypercalls),
VCPU_STAT("request_irq", request_irq_exits),
VCPU_STAT("irq_exits", irq_exits),
@@ -244,8 +244,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("nmi_injections", nmi_injections),
VCPU_STAT("req_event", req_event),
VCPU_STAT("l1d_flush", l1d_flush),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ VCPU_STAT_COM("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT_COM("halt_poll_fail_ns", halt_poll_fail_ns),
VCPU_STAT("nested_run", nested_run),
VCPU_STAT("directed_yield_attempted", directed_yield_attempted),
VCPU_STAT("directed_yield_successful", directed_yield_successful),
@@ -256,7 +256,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VM_STAT("mmu_recycled", mmu_recycled),
VM_STAT("mmu_cache_miss", mmu_cache_miss),
VM_STAT("mmu_unsync", mmu_unsync),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
+ VM_STAT_COM("remote_tlb_flush", remote_tlb_flush),
VM_STAT("largepages", lpages, .mode = 0444),
VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
@@ -1208,6 +1208,11 @@ struct kvm_stats_debugfs_item {
{ n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
#define VCPU_STAT(n, x, ...) \
{ n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ }
+#define VM_STAT_COM(n, x, ...) \
+ { n, offsetof(struct kvm, stat.common.x), KVM_STAT_VM, ## __VA_ARGS__ }
+#define VCPU_STAT_COM(n, x, ...) \
+ { n, offsetof(struct kvm_vcpu, stat.common.x), \
+ KVM_STAT_VCPU, ## __VA_ARGS__ }
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
@@ -76,5 +76,17 @@ struct kvm_mmu_memory_cache {
};
#endif
+struct kvm_vm_stat_common {
+ ulong remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat_common {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+};
#endif /* __KVM_TYPES_H__ */
@@ -330,7 +330,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
*/
if (!kvm_arch_flush_remote_tlb(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
- ++kvm->stat.remote_tlb_flush;
+ ++kvm->stat.common.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
@@ -2990,9 +2990,9 @@ static inline void
update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
{
if (waited)
- vcpu->stat.halt_poll_fail_ns += poll_ns;
+ vcpu->stat.common.halt_poll_fail_ns += poll_ns;
else
- vcpu->stat.halt_poll_success_ns += poll_ns;
+ vcpu->stat.common.halt_poll_success_ns += poll_ns;
}
/*
@@ -3010,16 +3010,16 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
- ++vcpu->stat.halt_attempted_poll;
+ ++vcpu->stat.common.halt_attempted_poll;
do {
/*
* This sets KVM_REQ_UNHALT if an interrupt
* arrives.
*/
if (kvm_vcpu_check_block(vcpu) < 0) {
- ++vcpu->stat.halt_successful_poll;
+ ++vcpu->stat.common.halt_successful_poll;
if (!vcpu_valid_wakeup(vcpu))
- ++vcpu->stat.halt_poll_invalid;
+ ++vcpu->stat.common.halt_poll_invalid;
goto out;
}
poll_end = cur = ktime_get();
@@ -3076,7 +3076,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp)) {
WRITE_ONCE(vcpu->ready, true);
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.common.halt_wakeup;
return true;
}
Put all common statistics in a separate structure to ease statistics handling for the incoming new statistics API. No functional change intended. Signed-off-by: Jing Zhang <jingzhangos@google.com> --- arch/arm64/include/asm/kvm_host.h | 9 ++------- arch/arm64/kvm/guest.c | 12 ++++++------ arch/mips/include/asm/kvm_host.h | 9 ++------- arch/mips/kvm/mips.c | 12 ++++++------ arch/powerpc/include/asm/kvm_host.h | 9 ++------- arch/powerpc/kvm/book3s.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv.c | 12 ++++++------ arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_pr_papr.c | 2 +- arch/powerpc/kvm/booke.c | 14 +++++++------- arch/s390/include/asm/kvm_host.h | 9 ++------- arch/s390/kvm/kvm-s390.c | 12 ++++++------ arch/x86/include/asm/kvm_host.h | 9 ++------- arch/x86/kvm/x86.c | 14 +++++++------- include/linux/kvm_host.h | 5 +++++ include/linux/kvm_types.h | 12 ++++++++++++ virt/kvm/kvm_main.c | 14 +++++++------- 17 files changed, 80 insertions(+), 88 deletions(-)