@@ -22,5 +22,4 @@ bool kvm_supports_guest_debug(void);
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
void kvm_remove_all_breakpoints(CPUState *cpu);
-
#endif /* KVM_CPUS_H */
@@ -313,6 +313,31 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test);
*/
bool kvm_device_supported(int vmfd, uint64_t type);
+/**
+ * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
+ * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
+ *
+ * @returns: 0 when success, errno (<0) when failed.
+ */
+int kvm_create_vcpu(CPUState *cpu);
+
+/**
+ * kvm_park_vcpu - Park QEMU KVM vCPU context
+ * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked.
+ *
+ * @returns: none
+ */
+void kvm_park_vcpu(CPUState *cpu);
+
+/**
+ * kvm_unpark_vcpu - unpark QEMU KVM vCPU context
+ * @s: KVM State
+ * @vcpu_id: Architecture vCPU ID of the parked vCPU
+ *
+ * @returns: KVM fd
+ */
+int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id);
+
/* Arch specific hooks */
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
@@ -340,14 +340,71 @@ err:
return ret;
}
+void kvm_park_vcpu(CPUState *cpu)
+{
+ struct KVMParkedVcpu *vcpu;
+
+ trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
+
+ vcpu = g_malloc0(sizeof(*vcpu));
+ vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
+ vcpu->kvm_fd = cpu->kvm_fd;
+ QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
+}
+
+int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
+{
+ struct KVMParkedVcpu *cpu;
+ int kvm_fd = -ENOENT;
+
+ QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
+ if (cpu->vcpu_id == vcpu_id) {
+ QLIST_REMOVE(cpu, node);
+ kvm_fd = cpu->kvm_fd;
+ g_free(cpu);
+ }
+ }
+
+ trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
+
+ return kvm_fd;
+}
+
+int kvm_create_vcpu(CPUState *cpu)
+{
+ unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
+ KVMState *s = kvm_state;
+ int kvm_fd;
+
+ /* check if the KVM vCPU already exist but is parked */
+ kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
+ if (kvm_fd < 0) {
+ /* vCPU not parked: create a new KVM vCPU */
+ kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
+ if (kvm_fd < 0) {
+ error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
+ return kvm_fd;
+ }
+ }
+
+ cpu->kvm_fd = kvm_fd;
+ cpu->kvm_state = s;
+ cpu->vcpu_dirty = true;
+ cpu->dirty_pages = 0;
+ cpu->throttle_us_per_full = 0;
+
+ trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
+
+ return 0;
+}
+
static int do_kvm_destroy_vcpu(CPUState *cpu)
{
KVMState *s = kvm_state;
long mmap_size;
- struct KVMParkedVcpu *vcpu = NULL;
int ret = 0;
- trace_kvm_destroy_vcpu();
+ trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
ret = kvm_arch_destroy_vcpu(cpu);
if (ret < 0) {
@@ -373,10 +430,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
}
}
- vcpu = g_malloc0(sizeof(*vcpu));
- vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
- vcpu->kvm_fd = cpu->kvm_fd;
- QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
+ kvm_park_vcpu(cpu);
err:
return ret;
}
@@ -389,24 +443,6 @@ void kvm_destroy_vcpu(CPUState *cpu)
}
}
-static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
-{
- struct KVMParkedVcpu *cpu;
-
- QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
- if (cpu->vcpu_id == vcpu_id) {
- int kvm_fd;
-
- QLIST_REMOVE(cpu, node);
- kvm_fd = cpu->kvm_fd;
- g_free(cpu);
- return kvm_fd;
- }
- }
-
- return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
-}
-
int kvm_init_vcpu(CPUState *cpu, Error **errp)
{
KVMState *s = kvm_state;
@@ -415,19 +451,14 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
- ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
+ ret = kvm_create_vcpu(cpu);
if (ret < 0) {
- error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
+ error_setg_errno(errp, -ret,
+ "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
kvm_arch_vcpu_id(cpu));
goto err;
}
- cpu->kvm_fd = ret;
- cpu->kvm_state = s;
- cpu->vcpu_dirty = true;
- cpu->dirty_pages = 0;
- cpu->throttle_us_per_full = 0;
-
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
ret = mmap_size;
@@ -9,6 +9,10 @@ kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
+kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
kvm_irqchip_commit_routes(void) ""
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
@@ -25,7 +29,6 @@ kvm_dirty_ring_reaper(const char *s) "%s"
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
kvm_dirty_ring_flush(int finished) "%d"
-kvm_destroy_vcpu(void) ""
kvm_failed_get_vcpu_mmap_size(void) ""
kvm_cpu_exec(void) ""
kvm_interrupt_exit_request(void) ""