diff mbox series

[v6,2/2] target: arm: Add support for VCPU event states

Message ID 1535039100-11248-2-git-send-email-gengdongjiu@huawei.com
State New
Headers show
Series [v6,1/2] linux-headers: Update to kernel mainline commit 815f0ddb3 | expand

Commit Message

Dongjiu Geng Aug. 23, 2018, 3:45 p.m. UTC
This patch extends the qemu-kvm state sync logic with support for
KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
And also it can support the exception state migration.

Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
---
Change since v5:
address Peter's comments:
1. Move the "struct serror" before the "end_reset_fields" in CPUARMState
2. Remove ARM_FEATURE_RAS_EXT and add a variable have_inject_serror_esr
3. Use the variable have_inject_serror_esr to track whether the kernel has state we need to migrate
4. Remove printf() in kvm_arch_put_registers()
5. ras_needed/vmstate_ras to serror_needed/vmstate_serror
6. Check to use "return env.serror.pending != 0" instead of "arm_feature(env, ARM_FEATURE_RAS_EXT)" in the ras_needed()

Change since v4:
1. Rebase the code to latest

Change since v3:
1. Add a new new subsection with a suitable 'ras_needed' function
   controlling whether it is present
2. Add a ARM_FEATURE_RAS feature bit for CPUARMState
---
 target/arm/cpu.h     |  7 +++++++
 target/arm/kvm64.c   | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 target/arm/machine.c | 22 ++++++++++++++++++++
 3 files changed, 88 insertions(+)

Comments

Peter Maydell Aug. 23, 2018, 4:52 p.m. UTC | #1
On 23 August 2018 at 16:45, Dongjiu Geng <gengdongjiu@huawei.com> wrote:
> This patch extends the qemu-kvm state sync logic with support for
> KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
> And also it can support the exception state migration.
>
> Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>

Did you forget to send a cover letter for this patchset?
I didn't see one. Our tooling prefers cover letters for
any patchset with more than one patch in it.

> ---
> Change since v5:
> address Peter's comments:
> 1. Move the "struct serror" before the "end_reset_fields" in CPUARMState
> 2. Remove ARM_FEATURE_RAS_EXT and add a variable have_inject_serror_esr
> 3. Use the variable have_inject_serror_esr to track whether the kernel has state we need to migrate
> 4. Remove printf() in kvm_arch_put_registers()
> 5. ras_needed/vmstate_ras to serror_needed/vmstate_serror
> 6. Check to use "return env.serror.pending != 0" instead of "arm_feature(env, ARM_FEATURE_RAS_EXT)" in the ras_needed()
>
> Change since v4:
> 1. Rebase the code to latest
>
> Change since v3:
> 1. Add a new new subsection with a suitable 'ras_needed' function
>    controlling whether it is present
> 2. Add a ARM_FEATURE_RAS feature bit for CPUARMState
> ---
>  target/arm/cpu.h     |  7 +++++++
>  target/arm/kvm64.c   | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  target/arm/machine.c | 22 ++++++++++++++++++++
>  3 files changed, 88 insertions(+)


> +static int kvm_put_vcpu_events(ARMCPU *cpu)
> +{
> +    CPUARMState *env = &cpu->env;
> +    struct kvm_vcpu_events events = {};
> +
> +    if (!kvm_has_vcpu_events()) {
> +        return 0;
> +    }
> +
> +    memset(&events, 0, sizeof(events));
> +    events.exception.serror_pending = env->serror.pending;
> +
> +    if (have_inject_serror_esr) {
> +        events.exception.serror_has_esr = env->serror.has_esr;
> +        events.exception.serror_esr = env->serror.esr;
> +    }

I realised that the effect of this condition is that
if we migrate a VM from a machine which supports specifying the
SError ESR to one which does not, and at the point of migration
there is a pending SError with an ESR value, then we will
silently drop the specified ESR value. The other alternative
would be to fail the migration (by dropping the if() check,
and letting the host kernel fail the ioctl if that meant that
we asked it to set an SError ESR it couldn't manage.)

I guess that's OK? It's all hypothetical currently since
we don't support migration between different host CPU types.

> +
> +    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
> +}
> +

> +static const VMStateDescription vmstate_serror = {
> +    .name = "cpu/ras",

You forgot to update the name here.

> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = serror_needed,
> +    .fields = (VMStateField[]) {
> +        VMSTATE_UINT32(env.serror.pending, ARMCPU),
> +        VMSTATE_UINT32(env.serror.has_esr, ARMCPU),
> +        VMSTATE_UINT64(env.serror.esr, ARMCPU),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
>  static bool m_needed(void *opaque)
>  {
>      ARMCPU *cpu = opaque;
> @@ -726,6 +747,7 @@ const VMStateDescription vmstate_arm_cpu = {
>  #ifdef TARGET_AARCH64
>          &vmstate_sve,
>  #endif
> +        &vmstate_serror,
>          NULL
>      }
>  };
> --

thanks
-- PMM
Dongjiu Geng Aug. 24, 2018, 10:28 a.m. UTC | #2
On 2018/8/24 0:52, Peter Maydell wrote:
> On 23 August 2018 at 16:45, Dongjiu Geng <gengdongjiu@huawei.com> wrote:
>> This patch extends the qemu-kvm state sync logic with support for
>> KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
>> And also it can support the exception state migration.
>>
>> Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
> 
> Did you forget to send a cover letter for this patchset?
> I didn't see one. Our tooling prefers cover letters for
> any patchset with more than one patch in it.

Ok, I will add a cover letter.

> 
>> ---
>> Change since v5:
>> address Peter's comments:
>> 1. Move the "struct serror" before the "end_reset_fields" in CPUARMState
>> 2. Remove ARM_FEATURE_RAS_EXT and add a variable have_inject_serror_esr
>> 3. Use the variable have_inject_serror_esr to track whether the kernel has state we need to migrate
>> 4. Remove printf() in kvm_arch_put_registers()
>> 5. ras_needed/vmstate_ras to serror_needed/vmstate_serror
>> 6. Check to use "return env.serror.pending != 0" instead of "arm_feature(env, ARM_FEATURE_RAS_EXT)" in the ras_needed()
>>
>> Change since v4:
>> 1. Rebase the code to latest
>>
>> Change since v3:
>> 1. Add a new new subsection with a suitable 'ras_needed' function
>>    controlling whether it is present
>> 2. Add a ARM_FEATURE_RAS feature bit for CPUARMState
>> ---
>>  target/arm/cpu.h     |  7 +++++++
>>  target/arm/kvm64.c   | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  target/arm/machine.c | 22 ++++++++++++++++++++
>>  3 files changed, 88 insertions(+)
> 
> 
>> +static int kvm_put_vcpu_events(ARMCPU *cpu)
>> +{
>> +    CPUARMState *env = &cpu->env;
>> +    struct kvm_vcpu_events events = {};
>> +
>> +    if (!kvm_has_vcpu_events()) {
>> +        return 0;
>> +    }
>> +
>> +    memset(&events, 0, sizeof(events));
>> +    events.exception.serror_pending = env->serror.pending;
>> +
>> +    if (have_inject_serror_esr) {
>> +        events.exception.serror_has_esr = env->serror.has_esr;
>> +        events.exception.serror_esr = env->serror.esr;
>> +    }
> 
> I realised that the effect of this condition is that
> if we migrate a VM from a machine which supports specifying the
> SError ESR to one which does not, and at the point of migration
> there is a pending SError with an ESR value, then we will
> silently drop the specified ESR value. The other alternative
> would be to fail the migration (by dropping the if() check,
> and letting the host kernel fail the ioctl if that meant that
> we asked it to set an SError ESR it couldn't manage.)
> 
> I guess that's OK? It's all hypothetical currently since
> we don't support migration between different host CPU types.

Peter,
   there are two status needed to migrate, one is serror_pending, another is SError ESR value.

If A migrates to B, A can set an SError ESR, but B does not support to set.
when A is pending a SError and need to migrate to B, I think it should support to migrate the serror_pending status without the ESR value(the ESR value is 0).
That is to say,  if A is pending a SError, when migrate to B, B should also pend a SError.

or do you think we should refused this migration?

currently kernel can support to pend a SError without the ESR value.
As shown the kernel code in [1].
when has_esr is true the ioctl can return failure, then Qemu can check the return value to decide whether do this migration.
but when the has_esr is false, without setting the ESR, QEMU can not check the return value because it always return true.


[1]:
+int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+                       struct kvm_vcpu_events *events)
+{
+       int i;
+       bool serror_pending = events->exception.serror_pending;
+       bool has_esr = events->exception.serror_has_esr;
+
+       /* check whether the reserved field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
+               if (events->reserved[i])
+                       return -EINVAL;
+
+       /* check whether the pad field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
+               if (events->exception.pad[i])
+                       return -EINVAL;
+
+       if (serror_pending && has_esr) {
+               if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+                       return -EINVAL;
+
+               if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
+                       kvm_set_sei_esr(vcpu, events->exception.serror_esr);
+               else
+                       return -EINVAL;
+       } else if (serror_pending) {
+               kvm_inject_vabt(vcpu);
+       }
+
+       return 0;
+}

> 
>> +
>> +    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
>> +}
>> +
> 
>> +static const VMStateDescription vmstate_serror = {
>> +    .name = "cpu/ras",
> 
> You forgot to update the name here.

Thanks for this pointing out, will change it

> 
>> +    .version_id = 1,
>> +    .minimum_version_id = 1,
>> +    .needed = serror_needed,
>> +    .fields = (VMStateField[]) {
>> +        VMSTATE_UINT32(env.serror.pending, ARMCPU),
>> +        VMSTATE_UINT32(env.serror.has_esr, ARMCPU),
>> +        VMSTATE_UINT64(env.serror.esr, ARMCPU),
>> +        VMSTATE_END_OF_LIST()
>> +    }
>> +};
>> +
>>  static bool m_needed(void *opaque)
>>  {
>>      ARMCPU *cpu = opaque;
>> @@ -726,6 +747,7 @@ const VMStateDescription vmstate_arm_cpu = {
>>  #ifdef TARGET_AARCH64
>>          &vmstate_sve,
>>  #endif
>> +        &vmstate_serror,
>>          NULL
>>      }
>>  };
>> --
> 
> thanks
> -- PMM
> 
> .
>
Peter Maydell Aug. 24, 2018, 10:38 a.m. UTC | #3
On 24 August 2018 at 11:28, gengdongjiu <gengdongjiu@huawei.com> wrote:
> On 2018/8/24 0:52, Peter Maydell wrote:
>> On 23 August 2018 at 16:45, Dongjiu Geng <gengdongjiu@huawei.com> wrote:
>>> +static int kvm_put_vcpu_events(ARMCPU *cpu)
>>> +{
>>> +    CPUARMState *env = &cpu->env;
>>> +    struct kvm_vcpu_events events = {};
>>> +
>>> +    if (!kvm_has_vcpu_events()) {
>>> +        return 0;
>>> +    }
>>> +
>>> +    memset(&events, 0, sizeof(events));
>>> +    events.exception.serror_pending = env->serror.pending;
>>> +
>>> +    if (have_inject_serror_esr) {
>>> +        events.exception.serror_has_esr = env->serror.has_esr;
>>> +        events.exception.serror_esr = env->serror.esr;
>>> +    }
>>
>> I realised that the effect of this condition is that
>> if we migrate a VM from a machine which supports specifying the
>> SError ESR to one which does not, and at the point of migration
>> there is a pending SError with an ESR value, then we will
>> silently drop the specified ESR value. The other alternative
>> would be to fail the migration (by dropping the if() check,
>> and letting the host kernel fail the ioctl if that meant that
>> we asked it to set an SError ESR it couldn't manage.)
>>
>> I guess that's OK? It's all hypothetical currently since
>> we don't support migration between different host CPU types.
>
> Peter,
>    there are two status needed to migrate, one is serror_pending, another is SError ESR value.
>
> If A migrates to B, A can set an SError ESR, but B does not support to set.
> when A is pending a SError and need to migrate to B, I think it should support to migrate the serror_pending status without the ESR value(the ESR value is 0).
> That is to say,  if A is pending a SError, when migrate to B, B should also pend a SError.
>
> or do you think we should refused this migration?

I don't know, that's why I asked. If we have a pending
SError with an ESR, and we end up on a destination machine
where we can pend the SError but not the ESR, does that
make sense (ie will the guest still be able to usefully
continue), or have we thrown away information that the
guest requires to be able to usefully use the SError ?
Presumably the ESR is important, or we could just never
bother to set it when pending SErrors.

thanks
-- PMM
Dongjiu Geng Aug. 24, 2018, 10:49 a.m. UTC | #4
On 2018/8/24 18:38, Peter Maydell wrote:
> On 24 August 2018 at 11:28, gengdongjiu <gengdongjiu@huawei.com> wrote:
>> On 2018/8/24 0:52, Peter Maydell wrote:
>>> On 23 August 2018 at 16:45, Dongjiu Geng <gengdongjiu@huawei.com> wrote:
>>>> +static int kvm_put_vcpu_events(ARMCPU *cpu)
>>>> +{
>>>> +    CPUARMState *env = &cpu->env;
>>>> +    struct kvm_vcpu_events events = {};
>>>> +
>>>> +    if (!kvm_has_vcpu_events()) {
>>>> +        return 0;
>>>> +    }
>>>> +
>>>> +    memset(&events, 0, sizeof(events));
>>>> +    events.exception.serror_pending = env->serror.pending;
>>>> +
>>>> +    if (have_inject_serror_esr) {
>>>> +        events.exception.serror_has_esr = env->serror.has_esr;
>>>> +        events.exception.serror_esr = env->serror.esr;
>>>> +    }
>>>
>>> I realised that the effect of this condition is that
>>> if we migrate a VM from a machine which supports specifying the
>>> SError ESR to one which does not, and at the point of migration
>>> there is a pending SError with an ESR value, then we will
>>> silently drop the specified ESR value. The other alternative
>>> would be to fail the migration (by dropping the if() check,
>>> and letting the host kernel fail the ioctl if that meant that
>>> we asked it to set an SError ESR it couldn't manage.)
>>>
>>> I guess that's OK? It's all hypothetical currently since
>>> we don't support migration between different host CPU types.
>>
>> Peter,
>>    there are two status needed to migrate, one is serror_pending, another is SError ESR value.
>>
>> If A migrates to B, A can set an SError ESR, but B does not support to set.
>> when A is pending a SError and need to migrate to B, I think it should support to migrate the serror_pending status without the ESR value(the ESR value is 0).
>> That is to say,  if A is pending a SError, when migrate to B, B should also pend a SError.
>>
>> or do you think we should refused this migration?
> 
> I don't know, that's why I asked. If we have a pending
> SError with an ESR, and we end up on a destination machine
> where we can pend the SError but not the ESR, does that
> make sense (ie will the guest still be able to usefully
  I agree this suggestion that can pend the SError but without the ESR in the destination machine.

> continue), or have we thrown away information that the
> guest requires to be able to usefully use the SError ?
> Presumably the ESR is important, or we could just never
> bother to set it when pending SErrors.
> 
> thanks
> -- PMM
> 
> .
>
diff mbox series

Patch

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 62c36b4..7030680 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -530,6 +530,13 @@  typedef struct CPUARMState {
          */
     } exception;
 
+    /* Information associated with an SError */
+    struct {
+        uint32_t pending;
+        uint32_t has_esr;
+        uint64_t esr;
+    } serror;
+
     /* Thumb-2 EE state.  */
     uint32_t teecr;
     uint32_t teehbr;
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index e0b8246..3913143 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -29,6 +29,7 @@ 
 #include "hw/arm/arm.h"
 
 static bool have_guest_debug;
+static bool have_inject_serror_esr;
 
 /*
  * Although the ARM implementation of hardware assisted debugging
@@ -546,6 +547,10 @@  int kvm_arch_init_vcpu(CPUState *cs)
 
     kvm_arm_init_debug(cs);
 
+    /* Check whether userspace can specify guest syndrome value */
+    have_inject_serror_esr = kvm_check_extension(cs->kvm_state,
+                                                 KVM_CAP_ARM_INJECT_SERROR_ESR);
+
     return kvm_arm_init_cpreg_list(cpu);
 }
 
@@ -600,6 +605,50 @@  int kvm_arm_cpreg_level(uint64_t regidx)
 #define AARCH64_SIMD_CTRL_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
                  KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 
+static int kvm_put_vcpu_events(ARMCPU *cpu)
+{
+    CPUARMState *env = &cpu->env;
+    struct kvm_vcpu_events events = {};
+
+    if (!kvm_has_vcpu_events()) {
+        return 0;
+    }
+
+    memset(&events, 0, sizeof(events));
+    events.exception.serror_pending = env->serror.pending;
+
+    if (have_inject_serror_esr) {
+        events.exception.serror_has_esr = env->serror.has_esr;
+        events.exception.serror_esr = env->serror.esr;
+    }
+
+    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(ARMCPU *cpu)
+{
+    CPUARMState *env = &cpu->env;
+    struct kvm_vcpu_events events;
+    int ret;
+
+    if (!kvm_has_vcpu_events()) {
+        return 0;
+    }
+
+    memset(&events, 0, sizeof(events));
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+
+    if (ret < 0) {
+        return ret;
+    }
+
+    env->serror.pending = events.exception.serror_pending;
+    env->serror.has_esr = events.exception.serror_has_esr;
+    env->serror.esr = events.exception.serror_esr;
+
+    return 0;
+}
+
 int kvm_arch_put_registers(CPUState *cs, int level)
 {
     struct kvm_one_reg reg;
@@ -727,6 +776,11 @@  int kvm_arch_put_registers(CPUState *cs, int level)
         return ret;
     }
 
+    ret = kvm_put_vcpu_events(cpu);
+    if (ret) {
+        return ret;
+    }
+
     if (!write_list_to_kvmstate(cpu, level)) {
         return EINVAL;
     }
@@ -863,6 +917,11 @@  int kvm_arch_get_registers(CPUState *cs)
     }
     vfp_set_fpcr(env, fpr);
 
+    ret = kvm_get_vcpu_events(cpu);
+    if (ret) {
+        return ret;
+    }
+
     if (!write_kvmstate_to_list(cpu)) {
         return EINVAL;
     }
diff --git a/target/arm/machine.c b/target/arm/machine.c
index ff4ec22..1a58b5f 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -172,6 +172,27 @@  static const VMStateDescription vmstate_sve = {
 };
 #endif /* AARCH64 */
 
+static bool serror_needed(void *opaque)
+{
+    ARMCPU *cpu = opaque;
+    CPUARMState *env = &cpu->env;
+
+    return env->serror.pending != 0;
+}
+
+static const VMStateDescription vmstate_serror = {
+    .name = "cpu/ras",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = serror_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32(env.serror.pending, ARMCPU),
+        VMSTATE_UINT32(env.serror.has_esr, ARMCPU),
+        VMSTATE_UINT64(env.serror.esr, ARMCPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static bool m_needed(void *opaque)
 {
     ARMCPU *cpu = opaque;
@@ -726,6 +747,7 @@  const VMStateDescription vmstate_arm_cpu = {
 #ifdef TARGET_AARCH64
         &vmstate_sve,
 #endif
+        &vmstate_serror,
         NULL
     }
 };