@@ -15,6 +15,20 @@
typedef uint32_t compat_pfn_t;
typedef uint32_t compat_ulong_t;
+typedef uint32_t compat_ptr_t;
+
+#define __DEFINE_COMPAT_HANDLE(name, type) \
+ typedef struct { \
+ compat_ptr_t c; \
+ type *_[0] __attribute__((packed)); \
+ } __compat_handle_ ## name; \
+
+#define DEFINE_COMPAT_HANDLE(name) __DEFINE_COMPAT_HANDLE(name, name)
+#define COMPAT_HANDLE(name) __compat_handle_ ## name
+
+DEFINE_COMPAT_HANDLE(compat_pfn_t);
+DEFINE_COMPAT_HANDLE(compat_ulong_t);
+DEFINE_COMPAT_HANDLE(int);
struct compat_xen_add_to_physmap {
domid_t domid;
@@ -24,4 +38,14 @@ struct compat_xen_add_to_physmap {
compat_pfn_t gpfn;
};
+struct compat_xen_add_to_physmap_batch {
+ domid_t domid;
+ uint16_t space;
+ uint16_t size;
+ uint16_t extra;
+ COMPAT_HANDLE(compat_ulong_t) idxs;
+ COMPAT_HANDLE(compat_pfn_t) gpfns;
+ COMPAT_HANDLE(int) errs;
+};
+
#endif /* QEMU_I386_XEN_COMPAT_H */
@@ -215,6 +215,72 @@ static int do_add_to_physmap(struct kvm_xen_exit *exit, X86CPU *cpu, uint64_t ar
return add_to_physmap_one(xatp.space, xatp.idx, xatp.gpfn);
}
+
+static int do_add_to_physmap_batch(struct kvm_xen_exit *exit, X86CPU *cpu,
+ uint64_t arg)
+{
+ struct xen_add_to_physmap_batch xatpb;
+ unsigned long idxs_gva, gpfns_gva, errs_gva;
+ CPUState *cs = CPU(cpu);
+ size_t op_sz;
+
+ if (hypercall_compat32(exit->u.hcall.longmode)) {
+ struct compat_xen_add_to_physmap_batch xatpb32;
+
+ qemu_build_assert(sizeof(struct compat_xen_add_to_physmap_batch) == 20);
+ if (kvm_copy_from_gva(cs, arg, &xatpb32, sizeof(xatpb32))) {
+ return -EFAULT;
+ }
+ xatpb.domid = xatpb32.domid;
+ xatpb.space = xatpb32.space;
+ xatpb.size = xatpb32.size;
+
+ idxs_gva = xatpb32.idxs.c;
+ gpfns_gva = xatpb32.gpfns.c;
+ errs_gva = xatpb32.errs.c;
+ op_sz = sizeof(uint32_t);;
+ } else {
+ if (kvm_copy_from_gva(cs, arg, &xatpb, sizeof(xatpb))) {
+ return -EFAULT;
+ }
+ op_sz = sizeof(unsigned long);
+ idxs_gva = (unsigned long)xatpb.idxs.p;
+ gpfns_gva = (unsigned long)xatpb.gpfns.p;
+ errs_gva = (unsigned long)xatpb.errs.p;
+ }
+
+ if (xatpb.domid != DOMID_SELF && xatpb.domid != xen_domid) {
+ return -ESRCH;
+ }
+
+ /* Explicitly invalid for the batch op. Not that we implement it anyway. */
+ if (xatpb.space == XENMAPSPACE_gmfn_range) {
+ return -EINVAL;
+ }
+
+ while (xatpb.size--) {
+ unsigned long idx = 0;
+ unsigned long gpfn = 0;
+ int err;
+
+ /* For 32-bit compat this only copies the low 32 bits of each */
+ if (kvm_copy_from_gva(cs, idxs_gva, &idx, op_sz) ||
+ kvm_copy_from_gva(cs, gpfns_gva, &gpfn, op_sz)) {
+ return -EFAULT;
+ }
+ idxs_gva += op_sz;
+ gpfns_gva += op_sz;
+
+ err = add_to_physmap_one(xatpb.space, idx, gpfn);
+
+ if (kvm_copy_to_gva(cs, errs_gva, &err, sizeof(err))) {
+ return -EFAULT;
+ }
+ errs_gva += sizeof(err);
+ }
+ return 0;
+}
+
static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg)
{
@@ -225,6 +291,10 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = do_add_to_physmap(exit, cpu, arg);
break;
+ case XENMEM_add_to_physmap_batch:
+ err = do_add_to_physmap_batch(exit, cpu, arg);
+ break;
+
default:
return false;
}