@@ -22,6 +22,9 @@ enum {
struct ucall {
uint64_t cmd;
uint64_t args[UCALL_MAX_ARGS];
+
+ /* Host virtual address of this struct. */
+ struct ucall *hva;
};
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
@@ -30,11 +33,7 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
-
-static inline void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
-{
- ucall_arch_init(vm, mmio_gpa);
-}
+void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
@@ -32,12 +32,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
- vm_vaddr_t gva;
-
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
"Unexpected ucall exit mmio address access");
- memcpy(&gva, run->mmio.data, sizeof(gva));
- return addr_gva2hva(vcpu->vm, gva);
+ return (void *)(*((uint64_t *)run->mmio.data));
}
return NULL;
@@ -55,7 +55,7 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL:
- return addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]);
+ return (void *)run->riscv_sbi.args[0];
case KVM_RISCV_SELFTESTS_SBI_UNEXP:
vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest");
@@ -26,7 +26,7 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
- return addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]);
+ return (void *)run->s.regs.gprs[reg];
}
return NULL;
}
@@ -1,22 +1,86 @@
// SPDX-License-Identifier: GPL-2.0-only
#include "kvm_util.h"
+#include "linux/types.h"
+#include "linux/bitmap.h"
+#include "linux/atomic.h"
+
+struct ucall_header {
+ DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
+ struct ucall ucalls[KVM_MAX_VCPUS];
+};
+
+/*
+ * ucall_pool holds per-VM values (global data is duplicated by each VM), it
+ * must not be accessed from host code.
+ */
+static struct ucall_header *ucall_pool;
+
+void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+{
+ struct ucall_header *hdr;
+ struct ucall *uc;
+ vm_vaddr_t vaddr;
+ int i;
+
+ vaddr = vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR);
+ hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
+ memset(hdr, 0, sizeof(*hdr));
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ uc = &hdr->ucalls[i];
+ uc->hva = uc;
+ }
+
+ write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
+
+ ucall_arch_init(vm, mmio_gpa);
+}
+
+static struct ucall *ucall_alloc(void)
+{
+ struct ucall *uc;
+ int i;
+
+ GUEST_ASSERT(ucall_pool);
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (!atomic_test_and_set_bit(i, ucall_pool->in_use)) {
+ uc = &ucall_pool->ucalls[i];
+ memset(uc->args, 0, sizeof(uc->args));
+ return uc;
+ }
+ }
+
+ GUEST_ASSERT(0);
+ return NULL;
+}
+
+static void ucall_free(struct ucall *uc)
+{
+ /* Beware, here be pointer arithmetic. */
+ clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
+}
void ucall(uint64_t cmd, int nargs, ...)
{
- struct ucall uc = {};
+ struct ucall *uc;
va_list va;
int i;
- WRITE_ONCE(uc.cmd, cmd);
+ uc = ucall_alloc();
+
+ WRITE_ONCE(uc->cmd, cmd);
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
- WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
+ WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)&uc);
+ ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+
+ ucall_free(uc);
}
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
@@ -26,7 +26,7 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
struct kvm_regs regs;
vcpu_regs_get(vcpu, ®s);
- return addr_gva2hva(vcpu->vm, regs.rdi);
+ return (void *)regs.rdi;
}
return NULL;
}