@@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- if (kvm->arch.threads_indep && kvm_is_radix(kvm))
+ /*
+ * The early POWER9 chips that can't mix radix and HPT threads
+ * on the same core also need the workaround for the problem
+ * where the TLB would prefetch entries in the guest exit path
+ * for radix guests using the guest PIDR value and LPID 0.
+ * The workaround is in the old path (kvmppc_run_vcpu())
+ * but not the new path (kvmhv_run_single_vcpu()).
+ */
+ if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
+ !no_mixing_hpt_and_radix)
r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
@@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
This disables the use of the streamlined entry path for radix guests on early POWER9 chips that need the workaround added in commit a25bd72badfa ("powerpc/mm/radix: Workaround prefetch issue with KVM", 2017-07-24), because the streamlined entry path does not include that workaround. This also means that we can't do nested HV-KVM on those chips. Since the chips that need that workaround are the same ones that can't run both radix and HPT guests at the same time on different threads of a core, we use the existing 'no_mixing_hpt_and_radix' variable that identifies those chips to identify when we can't use the new guest entry path, and when we can't do nested virtualization. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/kvm/book3s_hv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)