diff mbox series

[v2,5/9] migration: check required subsections are loaded, once

Message ID 20231024084043.2926316-6-marcandre.lureau@redhat.com
State New
Headers show
Series RFC: migration: check required entries and sections are loaded | expand

Commit Message

Marc-André Lureau Oct. 24, 2023, 8:40 a.m. UTC
From: Marc-André Lureau <marcandre.lureau@redhat.com>

Check that required subsections have been loaded.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
 migration/vmstate.c | 40 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 38 insertions(+), 2 deletions(-)

Comments

Juan Quintela Oct. 24, 2023, 10:41 a.m. UTC | #1
marcandre.lureau@redhat.com wrote:
> From: Marc-André Lureau <marcandre.lureau@redhat.com>
>
> Check that required subsections have been loaded.
>
> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>

Reviewed-by: Juan Quintela <quintela@redhat.com>

I will let other people to comment on this before merging.

I can see the (pontential problem) that Peter said: We still don't have
enough state.

But I can also see the problem that you are trying to fix: A needed
subsection didn't came.

> @@ -492,7 +521,7 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
>              /* it doesn't have a valid subsection name */
>              return 0;
>          }
> -        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
> +        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr, visited);
>          if (sub_vmsd == NULL) {
>              trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(lookup)");
>              return -ENOENT;

I fully agree that a given subsection shouldn't be loaded more than once.
The part needed for this can get in at any point.



> @@ -509,6 +538,13 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
>          }
>      }
>  
> +    for (i = 0; i < n; i++) {
> +        if (!visited[i] && vmstate_section_needed(vmsd->subsections[i], opaque)) {
> +            trace_vmstate_subsection_load_bad(vmsd->name, vmsd->subsections[i]->name, "(not visited)");
> +            return -ENOENT;
> +        }
> +    }
> +
>      trace_vmstate_subsection_load_good(vmsd->name);
>      return 0;
>  }

This part is the only one where I can see there could be some
discussion.  So I wil wait to see what other people think.

Later, Juan.
Peter Xu Oct. 24, 2023, 8:10 p.m. UTC | #2
On Tue, Oct 24, 2023 at 12:41:56PM +0200, Juan Quintela wrote:
> > @@ -509,6 +538,13 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
> >          }
> >      }
> >  
> > +    for (i = 0; i < n; i++) {
> > +        if (!visited[i] && vmstate_section_needed(vmsd->subsections[i], opaque)) {
> > +            trace_vmstate_subsection_load_bad(vmsd->name, vmsd->subsections[i]->name, "(not visited)");
> > +            return -ENOENT;
> > +        }
> > +    }
> > +
> >      trace_vmstate_subsection_load_good(vmsd->name);
> >      return 0;
> >  }
> 
> This part is the only one where I can see there could be some
> discussion.  So I wil wait to see what other people think.

Previous email:

https://lore.kernel.org/qemu-devel/ZR2P1RbxCfBdYBaQ@x1n/

I still think it is safer to not fail unless justified that we won't hit
surprises in the ->needed().  There are a lot so I assume it's non-trivial
to justify.

We can switch the tracepoint into error_report() in that case, though, as
long as it won't fail the migration.

Thanks,
diff mbox series

Patch

diff --git a/migration/vmstate.c b/migration/vmstate.c
index 16e33a5d34..d6fe38a5e1 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -451,22 +451,51 @@  int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
 }
 
 static const VMStateDescription *
-vmstate_get_subsection(const VMStateDescription **sub, char *idstr)
+vmstate_get_subsection(const VMStateDescription **sub, char *idstr, bool *visited)
 {
+    size_t i = 0;
+
     while (sub && *sub) {
         if (strcmp(idstr, (*sub)->name) == 0) {
+            if (visited[i]) {
+                return NULL;
+            }
+            visited[i] = true;
             return *sub;
         }
+        i++;
         sub++;
     }
     return NULL;
 }
 
+static size_t
+vmstate_get_n_subsections(const VMStateDescription **sub)
+{
+    size_t n = 0;
+
+    if (!sub) {
+        return 0;
+    }
+
+    while (sub[n]) {
+        n++;
+    }
+
+    return n;
+}
+
 static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
                                    void *opaque)
 {
+    size_t i, n;
+    g_autofree bool *visited = NULL;
+
     trace_vmstate_subsection_load(vmsd->name);
 
+    n = vmstate_get_n_subsections(vmsd->subsections);
+    visited = g_new0(bool, n);
+
     while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) {
         char idstr[256], *idstr_ret;
         int ret;
@@ -492,7 +521,7 @@  static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
             /* it doesn't have a valid subsection name */
             return 0;
         }
-        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
+        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr, visited);
         if (sub_vmsd == NULL) {
             trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(lookup)");
             return -ENOENT;
@@ -509,6 +538,13 @@  static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
         }
     }
 
+    for (i = 0; i < n; i++) {
+        if (!visited[i] && vmstate_section_needed(vmsd->subsections[i], opaque)) {
+            trace_vmstate_subsection_load_bad(vmsd->name, vmsd->subsections[i]->name, "(not visited)");
+            return -ENOENT;
+        }
+    }
+
     trace_vmstate_subsection_load_good(vmsd->name);
     return 0;
 }