@@ -366,12 +366,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
- if (cpu->created && !qemu_cpu_is_self(cpu)) {
- async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
- RUN_ON_CPU_HOST_INT(idxmap));
- } else {
- tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
- }
+ assert_cpu_is_self(cpu);
+
+ tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
}
void tlb_flush(CPUState *cpu)
@@ -560,28 +557,12 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
{
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
+ assert_cpu_is_self(cpu);
+
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
- if (qemu_cpu_is_self(cpu)) {
- tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
- } else if (idxmap < TARGET_PAGE_SIZE) {
- /*
- * Most targets have only a few mmu_idx. In the case where
- * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
- * allocating memory for this operation.
- */
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
- RUN_ON_CPU_TARGET_PTR(addr | idxmap));
- } else {
- TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
-
- /* Otherwise allocate a structure, freed by the worker. */
- d->addr = addr;
- d->idxmap = idxmap;
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(d));
- }
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
}
void tlb_flush_page(CPUState *cpu, vaddr addr)
@@ -744,6 +725,8 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
{
TLBFlushRangeData d;
+ assert_cpu_is_self(cpu);
+
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
@@ -764,14 +747,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
d.idxmap = idxmap;
d.bits = bits;
- if (qemu_cpu_is_self(cpu)) {
- tlb_flush_range_by_mmuidx_async_0(cpu, d);
- } else {
- /* Otherwise allocate a structure, freed by the worker. */
- TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
- async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
- RUN_ON_CPU_HOST_PTR(p));
- }
+ tlb_flush_range_by_mmuidx_async_0(cpu, d);
}
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
Some TLB flush operations can flush other CPUs. The problem with this is they used non-synced variants of flushes (i.e., that return before the destination has completed theflush). Since all TLB flush users need the synced variants and the last user of the non-synced flush was buggy, this is a footgun waiting to go off. There do not seem to be any callers that flush other CPUs, so remove the capability. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- accel/tcg/cputlb.c | 42 +++++++++--------------------------------- 1 file changed, 9 insertions(+), 33 deletions(-)