diff mbox series

[1/2] accel/tcg: fix start page passed to tb_invalidate_phys_page_range__locked()

Message ID 20230629082522.606219-2-mark.cave-ayland@ilande.co.uk
State New
Headers show
Series accel/tcg: fix page invalidation in tb_invalidate_phys_range() | expand

Commit Message

Mark Cave-Ayland June 29, 2023, 8:25 a.m. UTC
Due to a copy-paste error in tb_invalidate_phys_range() the start address of the
invalidation range was being passed to tb_invalidate_phys_page_range__locked()
instead of the start address of the current page.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Fixes: e506ad6a05 ("accel/tcg: Pass last not end to tb_invalidate_phys_range")
---
 accel/tcg/tb-maint.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

Comments

Cédric Le Goater June 29, 2023, 9:08 a.m. UTC | #1
On 6/29/23 10:25, Mark Cave-Ayland wrote:
> Due to a copy-paste error in tb_invalidate_phys_range() the start address of the
> invalidation range was being passed to tb_invalidate_phys_page_range__locked()
> instead of the start address of the current page.
> 
> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
> Fixes: e506ad6a05 ("accel/tcg: Pass last not end to tb_invalidate_phys_range")


Cc: qemu-stable@nongnu.org

> ---
>   accel/tcg/tb-maint.c | 10 ++++++----
>   1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
> index 3541419845..33ea1aadd1 100644
> --- a/accel/tcg/tb-maint.c
> +++ b/accel/tcg/tb-maint.c
> @@ -1182,15 +1182,17 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
>       index_last = last >> TARGET_PAGE_BITS;
>       for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
>           PageDesc *pd = page_find(index);
> -        tb_page_addr_t bound;
> +        tb_page_addr_t page_start, page_last;
>   
>           if (pd == NULL) {
>               continue;
>           }
>           assert_page_locked(pd);
> -        bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
> -        bound = MIN(bound, last);
> -        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
> +        page_start = index << TARGET_PAGE_BITS;
> +        page_last = page_start | ~TARGET_PAGE_MASK;
> +        page_last = MIN(page_last, last);
> +        tb_invalidate_phys_page_range__locked(pages, pd,
> +                                              page_start, page_last, 0);
>       }
>       page_collection_unlock(pages);
>   }
diff mbox series

Patch

diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 3541419845..33ea1aadd1 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -1182,15 +1182,17 @@  void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
     index_last = last >> TARGET_PAGE_BITS;
     for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
         PageDesc *pd = page_find(index);
-        tb_page_addr_t bound;
+        tb_page_addr_t page_start, page_last;
 
         if (pd == NULL) {
             continue;
         }
         assert_page_locked(pd);
-        bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
-        bound = MIN(bound, last);
-        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
+        page_start = index << TARGET_PAGE_BITS;
+        page_last = page_start | ~TARGET_PAGE_MASK;
+        page_last = MIN(page_last, last);
+        tb_invalidate_phys_page_range__locked(pages, pd,
+                                              page_start, page_last, 0);
     }
     page_collection_unlock(pages);
 }