@@ -2997,10 +2997,197 @@ int kvm_arch_get_default_type(MachineState *ms)
return 0;
}
+static int kvm_vm_enable_exception_payload(KVMState *s)
+{
+ int ret = 0;
+ has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
+ if (has_exception_payload) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable exception payload cap: %s",
+ strerror(-ret));
+ }
+ }
+
+ return ret;
+}
+
+static int kvm_vm_enable_triple_fault_event(KVMState *s)
+{
+ int ret = 0;
+ has_triple_fault_event = \
+ kvm_check_extension(s,
+ KVM_CAP_X86_TRIPLE_FAULT_EVENT);
+ if (has_triple_fault_event) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable triple fault event cap: %s",
+ strerror(-ret));
+ }
+ }
+ return ret;
+}
+
+static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t *identity_base)
+{
+ /*
+ * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
+ * In order to use vm86 mode, an EPT identity map and a TSS are needed.
+ * Since these must be part of guest physical memory, we need to allocate
+ * them, both by setting their start addresses in the kernel and by
+ * creating a corresponding e820 entry. We need 4 pages before the BIOS,
+ * so this value allows up to 16M BIOSes.
+ */
+ *identity_base = 0xfeffc000;
+ return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, identity_base);
+}
+
+static int kvm_vm_set_nr_mmu_pages(KVMState *s)
+{
+ uint64_t shadow_mem;
+ int ret = 0;
+ shadow_mem = object_property_get_int(OBJECT(s),
+ "kvm-shadow-mem",
+ &error_abort);
+ if (shadow_mem != -1) {
+ shadow_mem /= 4096;
+ ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+ }
+ return ret;
+}
+
+static int kvm_vm_set_tss_addr(KVMState *s, uint64_t identity_base)
+{
+ /* Set TSS base one page after EPT identity map. */
+ return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base);
+}
+
+static int kvm_vm_enable_disable_exits(KVMState *s)
+{
+ int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
+/* Work around for kernel header with a typo. TODO: fix header and drop. */
+#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
+#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
+#endif
+ if (disable_exits) {
+ disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
+ KVM_X86_DISABLE_EXITS_HLT |
+ KVM_X86_DISABLE_EXITS_PAUSE |
+ KVM_X86_DISABLE_EXITS_CSTATE);
+ }
+
+ return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
+ disable_exits);
+}
+
+static int kvm_vm_enable_bus_lock_exit(KVMState *s)
+{
+ int ret = 0;
+ ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
+ if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
+ error_report("kvm: bus lock detection unsupported");
+ return -ENOTSUP;
+ }
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
+ KVM_BUS_LOCK_DETECTION_EXIT);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable bus lock detection cap: %s",
+ strerror(-ret));
+ }
+
+ return ret;
+}
+
+static int kvm_vm_enable_notify_vmexit(KVMState *s)
+{
+ int ret = 0;
+ if (!kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
+ error_report("kvm: notify vmexit unsupported");
+ return -ENOTSUP;
+ }
+
+ if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) {
+ uint64_t notify_window_flags =
+ ((uint64_t)s->notify_window << 32) |
+ KVM_X86_NOTIFY_VMEXIT_ENABLED |
+ KVM_X86_NOTIFY_VMEXIT_USER;
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
+ notify_window_flags);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable notify vmexit cap: %s",
+ strerror(-ret));
+ }
+ }
+ return ret;
+}
+
+static int kvm_vm_enable_userspace_msr(KVMState *s)
+{
+ int ret;
+
+ if (!kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
+ error_report("kvm: userspace MSR not supported");
+ return -ENOTSUP;
+ }
+
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
+ KVM_MSR_EXIT_REASON_FILTER);
+ if (ret < 0) {
+ error_report("Could not enable user space MSRs: %s",
+ strerror(-ret));
+ exit(1);
+ }
+
+ if (!kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
+ kvm_rdmsr_core_thread_count, NULL)) {
+ error_report("Could not install MSR_CORE_THREAD_COUNT handler!");
+ exit(1);
+ }
+
+ return 0;
+}
+
+static void kvm_vm_enable_energy_msrs(KVMState *s)
+{
+ bool r;
+ if (s->msr_energy.enable == true) {
+ r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT,
+ kvm_rdmsr_rapl_power_unit, NULL);
+ if (!r) {
+ error_report("Could not install MSR_RAPL_POWER_UNIT \
+ handler");
+ exit(1);
+ }
+
+ r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT,
+ kvm_rdmsr_pkg_power_limit, NULL);
+ if (!r) {
+ error_report("Could not install MSR_PKG_POWER_LIMIT \
+ handler");
+ exit(1);
+ }
+
+ r = kvm_filter_msr(s, MSR_PKG_POWER_INFO,
+ kvm_rdmsr_pkg_power_info, NULL);
+ if (!r) {
+ error_report("Could not install MSR_PKG_POWER_INFO \
+ handler");
+ exit(1);
+ }
+ r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS,
+ kvm_rdmsr_pkg_energy_status, NULL);
+ if (!r) {
+ error_report("Could not install MSR_PKG_ENERGY_STATUS \
+ handler");
+ exit(1);
+ }
+ }
+ return;
+}
+
int kvm_arch_init(MachineState *ms, KVMState *s)
{
uint64_t identity_base = 0xfffbc000;
- uint64_t shadow_mem;
int ret;
struct utsname utsname;
Error *local_err = NULL;
@@ -3030,24 +3217,14 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
- has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
- if (has_exception_payload) {
- ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
- if (ret < 0) {
- error_report("kvm: Failed to enable exception payload cap: %s",
- strerror(-ret));
- return ret;
- }
+ ret = kvm_vm_enable_exception_payload(s);
+ if (ret < 0) {
+ return ret;
}
- has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
- if (has_triple_fault_event) {
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
- if (ret < 0) {
- error_report("kvm: Failed to enable triple fault event cap: %s",
- strerror(-ret));
- return ret;
- }
+ ret = kvm_vm_enable_triple_fault_event(s);
+ if (ret < 0) {
+ return ret;
}
if (s->xen_version) {
@@ -3078,22 +3255,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
uname(&utsname);
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
- /*
- * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
- * In order to use vm86 mode, an EPT identity map and a TSS are needed.
- * Since these must be part of guest physical memory, we need to allocate
- * them, both by setting their start addresses in the kernel and by
- * creating a corresponding e820 entry. We need 4 pages before the BIOS,
- * so this value allows up to 16M BIOSes.
- */
- identity_base = 0xfeffc000;
- ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+ ret = kvm_vm_set_identity_map_addr(s, &identity_base);
if (ret < 0) {
return ret;
}
- /* Set TSS base one page after EPT identity map. */
- ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
+ ret = kvm_vm_set_tss_addr(s, identity_base + 0x1000);
if (ret < 0) {
return ret;
}
@@ -3101,13 +3268,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
/* Tell fw_cfg to notify the BIOS to reserve the range. */
e820_add_entry(identity_base, 0x4000, E820_RESERVED);
- shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
- if (shadow_mem != -1) {
- shadow_mem /= 4096;
- ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
- if (ret < 0) {
- return ret;
- }
+ ret = kvm_vm_set_nr_mmu_pages(s);
+ if (ret < 0) {
+ return ret;
}
if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
@@ -3118,20 +3281,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
if (enable_cpu_pm) {
- int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
-/* Work around for kernel header with a typo. TODO: fix header and drop. */
-#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
-#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
-#endif
- if (disable_exits) {
- disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
- KVM_X86_DISABLE_EXITS_HLT |
- KVM_X86_DISABLE_EXITS_PAUSE |
- KVM_X86_DISABLE_EXITS_CSTATE);
- }
-
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
- disable_exits);
+ ret = kvm_vm_enable_disable_exits(s);
if (ret < 0) {
error_report("kvm: guest stopping CPU not supported: %s",
strerror(-ret));
@@ -3142,16 +3292,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
X86MachineState *x86ms = X86_MACHINE(ms);
if (x86ms->bus_lock_ratelimit > 0) {
- ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
- if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
- error_report("kvm: bus lock detection unsupported");
- return -ENOTSUP;
- }
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
- KVM_BUS_LOCK_DETECTION_EXIT);
+ ret = kvm_vm_enable_bus_lock_exit(s);
if (ret < 0) {
- error_report("kvm: Failed to enable bus lock detection cap: %s",
- strerror(-ret));
return ret;
}
ratelimit_init(&bus_lock_ratelimit_ctrl);
@@ -3160,80 +3302,23 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
- if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
- kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
- uint64_t notify_window_flags =
- ((uint64_t)s->notify_window << 32) |
- KVM_X86_NOTIFY_VMEXIT_ENABLED |
- KVM_X86_NOTIFY_VMEXIT_USER;
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
- notify_window_flags);
- if (ret < 0) {
- error_report("kvm: Failed to enable notify vmexit cap: %s",
- strerror(-ret));
- return ret;
- }
+ ret = kvm_vm_enable_notify_vmexit(s);
+ if (ret < 0) {
+ return ret;
}
- if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
- bool r;
- ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
- KVM_MSR_EXIT_REASON_FILTER);
- if (ret) {
- error_report("Could not enable user space MSRs: %s",
- strerror(-ret));
- exit(1);
- }
-
- r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
- kvm_rdmsr_core_thread_count, NULL);
- if (!r) {
- error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
- strerror(-ret));
- exit(1);
+ if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
+ ret = kvm_vm_enable_userspace_msr(s);
+ if (ret < 0) {
+ return ret;
}
if (s->msr_energy.enable == true) {
- r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT,
- kvm_rdmsr_rapl_power_unit, NULL);
- if (!r) {
- error_report("Could not install MSR_RAPL_POWER_UNIT \
- handler: %s",
- strerror(-ret));
- exit(1);
- }
-
- r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT,
- kvm_rdmsr_pkg_power_limit, NULL);
- if (!r) {
- error_report("Could not install MSR_PKG_POWER_LIMIT \
- handler: %s",
- strerror(-ret));
- exit(1);
- }
-
- r = kvm_filter_msr(s, MSR_PKG_POWER_INFO,
- kvm_rdmsr_pkg_power_info, NULL);
- if (!r) {
- error_report("Could not install MSR_PKG_POWER_INFO \
- handler: %s",
- strerror(-ret));
+ kvm_vm_enable_energy_msrs(s);
+ if (kvm_msr_energy_thread_init(s, ms)) {
+ error_report("kvm : error RAPL feature requirement not met");
exit(1);
}
- r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS,
- kvm_rdmsr_pkg_energy_status, NULL);
- if (!r) {
- error_report("Could not install MSR_PKG_ENERGY_STATUS \
- handler: %s",
- strerror(-ret));
- exit(1);
- }
- r = kvm_msr_energy_thread_init(s, ms);
- if (r) {
- error_report("kvm : error RAPL feature requirement not meet");
- exit(1);
- }
-
}
}
kvm_arch_init() enables a lot of vm capabilities. Refactor them into separate smaller functions. Energy MSR related operations also moved to its own function. There should be no functional impact. Signed-off-by: Ani Sinha <anisinha@redhat.com> --- target/i386/kvm/kvm.c | 337 ++++++++++++++++++++++++++---------------- 1 file changed, 211 insertions(+), 126 deletions(-)