Message ID | 4EDD8E73.8040505@ozlabs.org |
---|---|
State | New, archived |
Headers | show |
Why is it getting moved out of generic code? This is used to determine the maximum amount of vcpus supported by the host for a single guest, and as far as I know KVM_CAP_NR_VCPUS and KVM_CAP_MAX_VCPUS are not arch specific. On Tue, 2011-12-06 at 14:39 +1100, Matt Evans wrote: > Architectures can recommend/count/determine number of CPUs differently, so move > this out of generic code. > > Signed-off-by: Matt Evans <matt@ozlabs.org> > --- > tools/kvm/kvm.c | 30 ------------------------------ > tools/kvm/x86/kvm.c | 30 ++++++++++++++++++++++++++++++ > 2 files changed, 30 insertions(+), 30 deletions(-) > > diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c > index 7ce1640..e526483 100644 > --- a/tools/kvm/kvm.c > +++ b/tools/kvm/kvm.c > @@ -259,17 +259,6 @@ void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspac > die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); > } > > -int kvm__recommended_cpus(struct kvm *kvm) > -{ > - int ret; > - > - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); > - if (ret <= 0) > - die_perror("KVM_CAP_NR_VCPUS"); > - > - return ret; > -} > - > static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) > { > pid_t pid = getpid(); > @@ -282,25 +271,6 @@ static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) > pr_warning("Failed sending PID"); > } > > -/* > - * The following hack should be removed once 'x86: Raise the hard > - * VCPU count limit' makes it's way into the mainline. > - */ > -#ifndef KVM_CAP_MAX_VCPUS > -#define KVM_CAP_MAX_VCPUS 66 > -#endif > - > -int kvm__max_cpus(struct kvm *kvm) > -{ > - int ret; > - > - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); > - if (ret <= 0) > - ret = kvm__recommended_cpus(kvm); > - > - return ret; > -} > - > struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name) > { > struct kvm *kvm; > diff --git a/tools/kvm/x86/kvm.c b/tools/kvm/x86/kvm.c > index ac6c91e..75e4a52 100644 > --- a/tools/kvm/x86/kvm.c > +++ b/tools/kvm/x86/kvm.c > @@ -76,6 +76,36 @@ bool kvm__arch_cpu_supports_vm(void) > return regs.ecx & (1 << feature); > } > > +int kvm__recommended_cpus(struct kvm *kvm) > +{ > + int ret; > + > + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); > + if (ret <= 0) > + die_perror("KVM_CAP_NR_VCPUS"); > + > + return ret; > +} > + > +/* > + * The following hack should be removed once 'x86: Raise the hard > + * VCPU count limit' makes it's way into the mainline. > + */ > +#ifndef KVM_CAP_MAX_VCPUS > +#define KVM_CAP_MAX_VCPUS 66 > +#endif > + > +int kvm__max_cpus(struct kvm *kvm) > +{ > + int ret; > + > + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); > + if (ret <= 0) > + ret = kvm__recommended_cpus(kvm); > + > + return ret; > +} > + > /* > * Allocating RAM size bigger than 4GB requires us to leave a gap > * in the RAM which is used for PCI MMIO, hotplug, and unconfigured > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html
On 06/12/11 19:20, Sasha Levin wrote: > Why is it getting moved out of generic code? > > This is used to determine the maximum amount of vcpus supported by the > host for a single guest, and as far as I know KVM_CAP_NR_VCPUS and > KVM_CAP_MAX_VCPUS are not arch specific. I checked api.txt and you're right, it isn't arch-specific. I assumed it was, because PPC KVM doesn't support it ;-) I've dropped this patch and in its place implemented the api.txt suggestion of "if KVM_CAP_NR_VCPUS fails, use 4" instead of die(); you'll see that when I repost. This will have the effect of PPC being limited to 4 CPUs until the kernel supports that CAP. (I'll see about this part too.) Thanks, Matt > > On Tue, 2011-12-06 at 14:39 +1100, Matt Evans wrote: >> Architectures can recommend/count/determine number of CPUs differently, so move >> this out of generic code. >> >> Signed-off-by: Matt Evans <matt@ozlabs.org> >> --- >> tools/kvm/kvm.c | 30 ------------------------------ >> tools/kvm/x86/kvm.c | 30 ++++++++++++++++++++++++++++++ >> 2 files changed, 30 insertions(+), 30 deletions(-) >> >> diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c >> index 7ce1640..e526483 100644 >> --- a/tools/kvm/kvm.c >> +++ b/tools/kvm/kvm.c >> @@ -259,17 +259,6 @@ void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspac >> die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); >> } >> >> -int kvm__recommended_cpus(struct kvm *kvm) >> -{ >> - int ret; >> - >> - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); >> - if (ret <= 0) >> - die_perror("KVM_CAP_NR_VCPUS"); >> - >> - return ret; >> -} >> - >> static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) >> { >> pid_t pid = getpid(); >> @@ -282,25 +271,6 @@ static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) >> pr_warning("Failed sending PID"); >> } >> >> -/* >> - * The following hack should be removed once 'x86: Raise the hard >> - * VCPU count limit' makes it's way into the mainline. >> - */ >> -#ifndef KVM_CAP_MAX_VCPUS >> -#define KVM_CAP_MAX_VCPUS 66 >> -#endif >> - >> -int kvm__max_cpus(struct kvm *kvm) >> -{ >> - int ret; >> - >> - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); >> - if (ret <= 0) >> - ret = kvm__recommended_cpus(kvm); >> - >> - return ret; >> -} >> - >> struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name) >> { >> struct kvm *kvm; >> diff --git a/tools/kvm/x86/kvm.c b/tools/kvm/x86/kvm.c >> index ac6c91e..75e4a52 100644 >> --- a/tools/kvm/x86/kvm.c >> +++ b/tools/kvm/x86/kvm.c >> @@ -76,6 +76,36 @@ bool kvm__arch_cpu_supports_vm(void) >> return regs.ecx & (1 << feature); >> } >> >> +int kvm__recommended_cpus(struct kvm *kvm) >> +{ >> + int ret; >> + >> + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); >> + if (ret <= 0) >> + die_perror("KVM_CAP_NR_VCPUS"); >> + >> + return ret; >> +} >> + >> +/* >> + * The following hack should be removed once 'x86: Raise the hard >> + * VCPU count limit' makes it's way into the mainline. >> + */ >> +#ifndef KVM_CAP_MAX_VCPUS >> +#define KVM_CAP_MAX_VCPUS 66 >> +#endif >> + >> +int kvm__max_cpus(struct kvm *kvm) >> +{ >> + int ret; >> + >> + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); >> + if (ret <= 0) >> + ret = kvm__recommended_cpus(kvm); >> + >> + return ret; >> +} >> + >> /* >> * Allocating RAM size bigger than 4GB requires us to leave a gap >> * in the RAM which is used for PCI MMIO, hotplug, and unconfigured >> -- >> To unsubscribe from this list: send the line "unsubscribe kvm" in >> the body of a message to majordomo@vger.kernel.org >> More majordomo info at http://vger.kernel.org/majordomo-info.html > -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Wed, 2011-12-07 at 17:17 +1100, Matt Evans wrote: > On 06/12/11 19:20, Sasha Levin wrote: > > Why is it getting moved out of generic code? > > > > This is used to determine the maximum amount of vcpus supported by the > > host for a single guest, and as far as I know KVM_CAP_NR_VCPUS and > > KVM_CAP_MAX_VCPUS are not arch specific. > > I checked api.txt and you're right, it isn't arch-specific. I assumed it was, > because PPC KVM doesn't support it ;-) I've dropped this patch and in its place > implemented the api.txt suggestion of "if KVM_CAP_NR_VCPUS fails, use 4" instead > of die(); you'll see that when I repost. > > This will have the effect of PPC being limited to 4 CPUs until the kernel > supports that CAP. (I'll see about this part too.) I went to look at which limitation PPC places on amount of vcpus in guest, and saw this in kvmppc_core_vcpu_create() in the book3s code: vcpu = kvmppc_core_vcpu_create(kvm, id); vcpu->arch.wqp = &vcpu->wq; if (!IS_ERR(vcpu)) kvmppc_create_vcpu_debugfs(vcpu, id); This is wrong, right? The VCPU is dereferenced before actually checking that it's not an error.
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c index 7ce1640..e526483 100644 --- a/tools/kvm/kvm.c +++ b/tools/kvm/kvm.c @@ -259,17 +259,6 @@ void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspac die_perror("KVM_SET_USER_MEMORY_REGION ioctl"); } -int kvm__recommended_cpus(struct kvm *kvm) -{ - int ret; - - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); - if (ret <= 0) - die_perror("KVM_CAP_NR_VCPUS"); - - return ret; -} - static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) { pid_t pid = getpid(); @@ -282,25 +271,6 @@ static void kvm__pid(int fd, u32 type, u32 len, u8 *msg) pr_warning("Failed sending PID"); } -/* - * The following hack should be removed once 'x86: Raise the hard - * VCPU count limit' makes it's way into the mainline. - */ -#ifndef KVM_CAP_MAX_VCPUS -#define KVM_CAP_MAX_VCPUS 66 -#endif - -int kvm__max_cpus(struct kvm *kvm) -{ - int ret; - - ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); - if (ret <= 0) - ret = kvm__recommended_cpus(kvm); - - return ret; -} - struct kvm *kvm__init(const char *kvm_dev, u64 ram_size, const char *name) { struct kvm *kvm; diff --git a/tools/kvm/x86/kvm.c b/tools/kvm/x86/kvm.c index ac6c91e..75e4a52 100644 --- a/tools/kvm/x86/kvm.c +++ b/tools/kvm/x86/kvm.c @@ -76,6 +76,36 @@ bool kvm__arch_cpu_supports_vm(void) return regs.ecx & (1 << feature); } +int kvm__recommended_cpus(struct kvm *kvm) +{ + int ret; + + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS); + if (ret <= 0) + die_perror("KVM_CAP_NR_VCPUS"); + + return ret; +} + +/* + * The following hack should be removed once 'x86: Raise the hard + * VCPU count limit' makes it's way into the mainline. + */ +#ifndef KVM_CAP_MAX_VCPUS +#define KVM_CAP_MAX_VCPUS 66 +#endif + +int kvm__max_cpus(struct kvm *kvm) +{ + int ret; + + ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_MAX_VCPUS); + if (ret <= 0) + ret = kvm__recommended_cpus(kvm); + + return ret; +} + /* * Allocating RAM size bigger than 4GB requires us to leave a gap * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
Architectures can recommend/count/determine number of CPUs differently, so move this out of generic code. Signed-off-by: Matt Evans <matt@ozlabs.org> --- tools/kvm/kvm.c | 30 ------------------------------ tools/kvm/x86/kvm.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 30 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html