Message ID | 20210505154642.178702-5-bharata@linux.ibm.com |
---|---|
State | New |
Headers | show |
Series | Support for H_RPT_INVALIDATE in PowerPC KVM | expand |
Excerpts from Bharata B Rao's message of May 6, 2021 1:46 am: > Enable support for process-scoped invalidations from nested > guests and partition-scoped invalidations for nested guests. > > Process-scoped invalidations for any level of nested guests > are handled by implementing H_RPT_INVALIDATE handler in the > nested guest exit path in L0. > > Partition-scoped invalidation requests are forwarded to the > right nested guest, handled there and passed down to L0 > for eventual handling. > > Signed-off-by: Bharata B Rao <bharata@linux.ibm.com> > Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> > [Nested guest partition-scoped invalidation changes] > --- > .../include/asm/book3s/64/tlbflush-radix.h | 4 + > arch/powerpc/include/asm/kvm_book3s.h | 3 + > arch/powerpc/kvm/book3s_hv.c | 66 ++++++++++- > arch/powerpc/kvm/book3s_hv_nested.c | 104 ++++++++++++++++++ > arch/powerpc/mm/book3s64/radix_tlb.c | 4 - > 5 files changed, 174 insertions(+), 7 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h > index 8b33601cdb9d..a46fd37ad552 100644 > --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h > +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h > @@ -4,6 +4,10 @@ > > #include <asm/hvcall.h> > > +#define RIC_FLUSH_TLB 0 > +#define RIC_FLUSH_PWC 1 > +#define RIC_FLUSH_ALL 2 > + > struct vm_area_struct; > struct mm_struct; > struct mmu_gather; > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h > index a6e9a5585e61..fdf54741c58c 100644 > --- a/arch/powerpc/include/asm/kvm_book3s.h > +++ b/arch/powerpc/include/asm/kvm_book3s.h > @@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); > void kvmhv_release_all_nested(struct kvm *kvm); > long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); > long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); > +long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, > + unsigned long type, unsigned long pg_sizes, > + unsigned long start, unsigned long end); > int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, > u64 time_limit, unsigned long lpcr); > void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index bcf34246bbe9..a2e7fbec796a 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -925,6 +925,41 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) > return yield_count; > } > > +/* > + * H_RPT_INVALIDATE hcall handler for nested guests. > + * > + * Handles only nested process-scoped invalidation requests in L0. > + */ > +static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) > +{ > + unsigned long type = kvmppc_get_gpr(vcpu, 6); > + unsigned long pid, pg_sizes, start, end, psize; > + struct mmu_psize_def *def; > + > + /* > + * The partition-scoped invalidations aren't handled here in L0. > + */ > + if (type & H_RPTI_TYPE_NESTED) > + return RESUME_HOST; > + > + pid = kvmppc_get_gpr(vcpu, 4); > + pg_sizes = kvmppc_get_gpr(vcpu, 7); > + start = kvmppc_get_gpr(vcpu, 8); > + end = kvmppc_get_gpr(vcpu, 9); > + > + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { > + def = &mmu_psize_defs[psize]; > + if (pg_sizes & def->h_rpt_pgsize) > + do_h_rpt_invalidate_prt(pid, > + vcpu->arch.nested->shadow_lpid, > + type, (1UL << def->shift), > + psize, start, end); > + } > + > + kvmppc_set_gpr(vcpu, 3, H_SUCCESS); > + return RESUME_GUEST; > +} > + > static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, > unsigned long id, unsigned long target, > unsigned long type, unsigned long pg_sizes, > @@ -941,10 +976,18 @@ static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, > > /* > * Partition-scoped invalidation for nested guests. > - * Not yet supported > */ > - if (type & H_RPTI_TYPE_NESTED) > - return H_P3; > + if (type & H_RPTI_TYPE_NESTED) { > + if (!nesting_enabled(vcpu->kvm)) > + return H_FUNCTION; > + > + /* Support only cores as target */ > + if (target != H_RPTI_TARGET_CMMU) > + return H_P2; > + > + return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, > + start, end); > + } > > /* > * Process-scoped invalidation for L1 guests. > @@ -1639,6 +1682,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) > if (!xics_on_xive()) > kvmppc_xics_rm_complete(vcpu, 0); > break; > + case BOOK3S_INTERRUPT_SYSCALL: > + { > + unsigned long req = kvmppc_get_gpr(vcpu, 3); > + > + /* > + * The H_RPT_INVALIDATE hcalls issued by nested > + * guests for process-scoped invalidations when > + * GTSE=0, are handled here in L0. > + */ > + if (req == H_RPT_INVALIDATE) { > + r = kvmppc_nested_h_rpt_invalidate(vcpu); > + break; > + } > + > + r = RESUME_HOST; > + break; > + } > default: > r = RESUME_HOST; > break; > diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c > index 60724f674421..91f10290130d 100644 > --- a/arch/powerpc/kvm/book3s_hv_nested.c > +++ b/arch/powerpc/kvm/book3s_hv_nested.c > @@ -1214,6 +1214,110 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu) > return H_SUCCESS; > } > > +static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu, > + unsigned long lpid, > + unsigned long page_size, > + unsigned long ap, > + unsigned long start, > + unsigned long end) > +{ > + unsigned long addr = start; > + int ret; > + > + do { > + ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, > + get_epn(addr)); > + if (ret) > + return ret; > + addr += page_size; > + } while (addr < end); > + > + return ret; > +} Similar comments for single page thresholds, and multiple page sizes. > + > +static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu, > + unsigned long lpid, unsigned long ric) > +{ > + struct kvm *kvm = vcpu->kvm; > + struct kvm_nested_guest *gp; > + > + gp = kvmhv_get_nested(kvm, lpid, false); > + if (gp) { > + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); > + kvmhv_put_nested(gp); > + } > + return H_SUCCESS; > +} > + > +/* > + * Performs partition-scoped invalidations for nested guests > + * as part of H_RPT_INVALIDATE hcall. > + */ > +long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, > + unsigned long type, unsigned long pg_sizes, > + unsigned long start, unsigned long end) > +{ > + struct kvm_nested_guest *gp; > + long ret; > + unsigned long psize, ap; > + > + /* > + * If L2 lpid isn't valid, we need to return H_PARAMETER. > + * > + * However, nested KVM issues a L2 lpid flush call when creating > + * partition table entries for L2. This happens even before the > + * corresponding shadow lpid is created in HV which happens in > + * H_ENTER_NESTED call. Since we can't differentiate this case from > + * the invalid case, we ignore such flush requests and return success. > + */ > + gp = kvmhv_find_nested(vcpu->kvm, lpid); > + if (!gp) > + return H_SUCCESS; > + > + /* > + * A flush all request can be handled by a full lpid flush only. > + */ > + if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL) > + return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL); > + > +#if 0 > + /* > + * We don't need to handle a PWC flush like process table here, > + * because intermediate partition scoped table in nested guest doesn't > + * really have PWC. Only level we have PWC is in L0 and for nested > + * invalidate at L0 we always do kvm_flush_lpid() which does > + * radix__flush_all_lpid(). For range invalidate at any level, we > + * are not removing the higher level page tables and hence there is > + * no PWC invalidate needed. > + */ > + if (type & H_RPTI_TYPE_PWC) { > + ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC); > + if (ret) > + return H_P4; > + } > +#endif I think removing this #if 0 and the unnecessary code is fine, just a bit more explanation in the comment would help. And "doesn't really" implies it sort of might a little bit, I think what you want is "really doesn't" :) As I understand it, the L0 does not cache any intermediate levels of the nested guest's partition scope at all. Only the nested HV's pte entries are copied into the shadow page table, so we only care if the PTEs are changed, and the PWCs that the processor creates for the shadow page table are managed by the kvmppc_unmap_pte() etc functions... I think? Thanks, Nick > + > + if (start == 0 && end == -1) > + return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB); > + > + if (type & H_RPTI_TYPE_TLB) { > + struct mmu_psize_def *def; > + > + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { > + def = &mmu_psize_defs[psize]; > + if (!(pg_sizes & def->h_rpt_pgsize)) > + continue; > + > + ret = do_tlb_invalidate_nested_tlb(vcpu, lpid, > + (1UL << def->shift), > + ap, start, end); > + if (ret) > + return H_P4; > + } > + } > + return H_SUCCESS; > +} > + > /* Used to convert a nested guest real address to a L1 guest real address */ > static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, > struct kvm_nested_guest *gp, > diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c > index 65aad9ce3557..613198c41006 100644 > --- a/arch/powerpc/mm/book3s64/radix_tlb.c > +++ b/arch/powerpc/mm/book3s64/radix_tlb.c > @@ -20,10 +20,6 @@ > > #include "internal.h" > > -#define RIC_FLUSH_TLB 0 > -#define RIC_FLUSH_PWC 1 > -#define RIC_FLUSH_ALL 2 > - > /* > * tlbiel instruction for radix, set invalidation > * i.e., r=1 and is=01 or is=10 or is=11 > -- > 2.26.2 > >
Nicholas Piggin <npiggin@gmail.com> writes: ... > + */ >> +long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, >> + unsigned long type, unsigned long pg_sizes, >> + unsigned long start, unsigned long end) >> +{ >> + struct kvm_nested_guest *gp; >> + long ret; >> + unsigned long psize, ap; >> + >> + /* >> + * If L2 lpid isn't valid, we need to return H_PARAMETER. >> + * >> + * However, nested KVM issues a L2 lpid flush call when creating >> + * partition table entries for L2. This happens even before the >> + * corresponding shadow lpid is created in HV which happens in >> + * H_ENTER_NESTED call. Since we can't differentiate this case from >> + * the invalid case, we ignore such flush requests and return success. >> + */ >> + gp = kvmhv_find_nested(vcpu->kvm, lpid); >> + if (!gp) >> + return H_SUCCESS; >> + >> + /* >> + * A flush all request can be handled by a full lpid flush only. >> + */ >> + if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL) >> + return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL); >> + >> +#if 0 >> + /* >> + * We don't need to handle a PWC flush like process table here, >> + * because intermediate partition scoped table in nested guest doesn't >> + * really have PWC. Only level we have PWC is in L0 and for nested >> + * invalidate at L0 we always do kvm_flush_lpid() which does >> + * radix__flush_all_lpid(). For range invalidate at any level, we >> + * are not removing the higher level page tables and hence there is >> + * no PWC invalidate needed. >> + */ >> + if (type & H_RPTI_TYPE_PWC) { >> + ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC); >> + if (ret) >> + return H_P4; >> + } >> +#endif > > I think removing this #if 0 and the unnecessary code is fine, just a bit > more explanation in the comment would help. And "doesn't really" implies > it sort of might a little bit, I think what you want is "really doesn't" > :) yes. > > As I understand it, the L0 does not cache any intermediate levels of the > nested guest's partition scope at all. Only the nested HV's pte entries > are copied into the shadow page table, so we only care if the PTEs are > changed, and the PWCs that the processor creates for the shadow page > table are managed by the kvmppc_unmap_pte() etc functions... I think? That is correct. The reason I added the comment there was to clarify why the PWC type is not handled in case of partition scoped invalidate similar to process scoped invalidate. The code fragment was left as an indication of what should happen theoretically. All higher levels of guest (L1, L2.. etc) have partition tables that are not really used for hardware page table walk. H_RPT_INVALIDATE hcall is used as a hint to free those page table entries. L0 on receiving the hcall will forward the same to higher levels guest which after invalidating its shadow pte will further issue the H_RPT_INVALIDATE hcall to clear parition scoped entries of the current guest. If it is a range TLB flush, we just clear the shadow pte, higher levels page tables are not modified and hence no PWC flush is required. If it is full lpid flush because of RIC=1/2 or because range is 0 -> -1 we do free the full partition table and does a kvmhv_flush_lpid() which will eventually ends up calling radix__flush_all_lpid(). These function names are kept in the comment so that a new person looking at the code can easily follow the code path. -aneesh
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 8b33601cdb9d..a46fd37ad552 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -4,6 +4,10 @@ #include <asm/hvcall.h> +#define RIC_FLUSH_TLB 0 +#define RIC_FLUSH_PWC 1 +#define RIC_FLUSH_ALL 2 + struct vm_area_struct; struct mm_struct; struct mmu_gather; diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index a6e9a5585e61..fdf54741c58c 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); +long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, + unsigned long type, unsigned long pg_sizes, + unsigned long start, unsigned long end); int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index bcf34246bbe9..a2e7fbec796a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -925,6 +925,41 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) return yield_count; } +/* + * H_RPT_INVALIDATE hcall handler for nested guests. + * + * Handles only nested process-scoped invalidation requests in L0. + */ +static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) +{ + unsigned long type = kvmppc_get_gpr(vcpu, 6); + unsigned long pid, pg_sizes, start, end, psize; + struct mmu_psize_def *def; + + /* + * The partition-scoped invalidations aren't handled here in L0. + */ + if (type & H_RPTI_TYPE_NESTED) + return RESUME_HOST; + + pid = kvmppc_get_gpr(vcpu, 4); + pg_sizes = kvmppc_get_gpr(vcpu, 7); + start = kvmppc_get_gpr(vcpu, 8); + end = kvmppc_get_gpr(vcpu, 9); + + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { + def = &mmu_psize_defs[psize]; + if (pg_sizes & def->h_rpt_pgsize) + do_h_rpt_invalidate_prt(pid, + vcpu->arch.nested->shadow_lpid, + type, (1UL << def->shift), + psize, start, end); + } + + kvmppc_set_gpr(vcpu, 3, H_SUCCESS); + return RESUME_GUEST; +} + static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, unsigned long id, unsigned long target, unsigned long type, unsigned long pg_sizes, @@ -941,10 +976,18 @@ static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, /* * Partition-scoped invalidation for nested guests. - * Not yet supported */ - if (type & H_RPTI_TYPE_NESTED) - return H_P3; + if (type & H_RPTI_TYPE_NESTED) { + if (!nesting_enabled(vcpu->kvm)) + return H_FUNCTION; + + /* Support only cores as target */ + if (target != H_RPTI_TARGET_CMMU) + return H_P2; + + return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, + start, end); + } /* * Process-scoped invalidation for L1 guests. @@ -1639,6 +1682,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) if (!xics_on_xive()) kvmppc_xics_rm_complete(vcpu, 0); break; + case BOOK3S_INTERRUPT_SYSCALL: + { + unsigned long req = kvmppc_get_gpr(vcpu, 3); + + /* + * The H_RPT_INVALIDATE hcalls issued by nested + * guests for process-scoped invalidations when + * GTSE=0, are handled here in L0. + */ + if (req == H_RPT_INVALIDATE) { + r = kvmppc_nested_h_rpt_invalidate(vcpu); + break; + } + + r = RESUME_HOST; + break; + } default: r = RESUME_HOST; break; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 60724f674421..91f10290130d 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1214,6 +1214,110 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu) return H_SUCCESS; } +static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu, + unsigned long lpid, + unsigned long page_size, + unsigned long ap, + unsigned long start, + unsigned long end) +{ + unsigned long addr = start; + int ret; + + do { + ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, + get_epn(addr)); + if (ret) + return ret; + addr += page_size; + } while (addr < end); + + return ret; +} + +static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu, + unsigned long lpid, unsigned long ric) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + + gp = kvmhv_get_nested(kvm, lpid, false); + if (gp) { + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + kvmhv_put_nested(gp); + } + return H_SUCCESS; +} + +/* + * Performs partition-scoped invalidations for nested guests + * as part of H_RPT_INVALIDATE hcall. + */ +long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, + unsigned long type, unsigned long pg_sizes, + unsigned long start, unsigned long end) +{ + struct kvm_nested_guest *gp; + long ret; + unsigned long psize, ap; + + /* + * If L2 lpid isn't valid, we need to return H_PARAMETER. + * + * However, nested KVM issues a L2 lpid flush call when creating + * partition table entries for L2. This happens even before the + * corresponding shadow lpid is created in HV which happens in + * H_ENTER_NESTED call. Since we can't differentiate this case from + * the invalid case, we ignore such flush requests and return success. + */ + gp = kvmhv_find_nested(vcpu->kvm, lpid); + if (!gp) + return H_SUCCESS; + + /* + * A flush all request can be handled by a full lpid flush only. + */ + if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL) + return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL); + +#if 0 + /* + * We don't need to handle a PWC flush like process table here, + * because intermediate partition scoped table in nested guest doesn't + * really have PWC. Only level we have PWC is in L0 and for nested + * invalidate at L0 we always do kvm_flush_lpid() which does + * radix__flush_all_lpid(). For range invalidate at any level, we + * are not removing the higher level page tables and hence there is + * no PWC invalidate needed. + */ + if (type & H_RPTI_TYPE_PWC) { + ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC); + if (ret) + return H_P4; + } +#endif + + if (start == 0 && end == -1) + return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB); + + if (type & H_RPTI_TYPE_TLB) { + struct mmu_psize_def *def; + + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { + def = &mmu_psize_defs[psize]; + if (!(pg_sizes & def->h_rpt_pgsize)) + continue; + + ret = do_tlb_invalidate_nested_tlb(vcpu, lpid, + (1UL << def->shift), + ap, start, end); + if (ret) + return H_P4; + } + } + return H_SUCCESS; +} + /* Used to convert a nested guest real address to a L1 guest real address */ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 65aad9ce3557..613198c41006 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -20,10 +20,6 @@ #include "internal.h" -#define RIC_FLUSH_TLB 0 -#define RIC_FLUSH_PWC 1 -#define RIC_FLUSH_ALL 2 - /* * tlbiel instruction for radix, set invalidation * i.e., r=1 and is=01 or is=10 or is=11