[PATCH] vcpu_dirty: share the same field in CPUState for all accelerators

Previous Topic Next Topic
 
classic Classic list List threaded Threaded
4 messages Options
Reply | Threaded
Open this post in threaded view
|

[PATCH] vcpu_dirty: share the same field in CPUState for all accelerators

Sergio Andres Gomez Del Real
This patch simply replaces the separate boolean field in CPUState that
kvm, hax (and upcoming hvf) have for keeping track of vcpu dirtiness
with a single shared field.

Signed-off-by: Sergio Andres Gomez Del Real <[hidden email]>
---
 include/qom/cpu.h     |  5 +++--
 kvm-all.c             | 18 +++++++++---------
 target/i386/hax-all.c | 12 ++++++------
 target/mips/kvm.c     |  4 ++--
 4 files changed, 20 insertions(+), 19 deletions(-)

diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 89ddb686fb..2098eeae03 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -369,7 +369,6 @@ struct CPUState {
     vaddr mem_io_vaddr;
 
     int kvm_fd;
-    bool kvm_vcpu_dirty;
     struct KVMState *kvm_state;
     struct kvm_run *kvm_run;
 
@@ -386,6 +385,9 @@ struct CPUState {
     uint32_t can_do_io;
     int32_t exception_index; /* used by m68k TCG */
 
+    /* shared by kvm, hax and hvf */
+    bool vcpu_dirty;
+
     /* Used to keep track of an outstanding cpu throttle thread for migration
      * autoconverge
      */
@@ -400,7 +402,6 @@ struct CPUState {
         icount_decr_u16 u16;
     } icount_decr;
 
-    bool hax_vcpu_dirty;
     struct hax_vcpu_state *hax_vcpu;
 
     /* The pending_tlb_flush flag is set and cleared atomically to
diff --git a/kvm-all.c b/kvm-all.c
index ab8262f672..a5eaff270e 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -318,7 +318,7 @@ int kvm_init_vcpu(CPUState *cpu)
 
     cpu->kvm_fd = ret;
     cpu->kvm_state = s;
-    cpu->kvm_vcpu_dirty = true;
+    cpu->vcpu_dirty = true;
 
     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
     if (mmap_size < 0) {
@@ -1864,15 +1864,15 @@ void kvm_flush_coalesced_mmio_buffer(void)
 
 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
 {
-    if (!cpu->kvm_vcpu_dirty) {
+    if (!cpu->vcpu_dirty) {
         kvm_arch_get_registers(cpu);
-        cpu->kvm_vcpu_dirty = true;
+        cpu->vcpu_dirty = true;
     }
 }
 
 void kvm_cpu_synchronize_state(CPUState *cpu)
 {
-    if (!cpu->kvm_vcpu_dirty) {
+    if (!cpu->vcpu_dirty) {
         run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
     }
 }
@@ -1880,7 +1880,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
 {
     kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
-    cpu->kvm_vcpu_dirty = false;
+    cpu->vcpu_dirty = false;
 }
 
 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
@@ -1891,7 +1891,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
 {
     kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
-    cpu->kvm_vcpu_dirty = false;
+    cpu->vcpu_dirty = false;
 }
 
 void kvm_cpu_synchronize_post_init(CPUState *cpu)
@@ -1901,7 +1901,7 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
 
 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
 {
-    cpu->kvm_vcpu_dirty = true;
+    cpu->vcpu_dirty = true;
 }
 
 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
@@ -1981,9 +1981,9 @@ int kvm_cpu_exec(CPUState *cpu)
     do {
         MemTxAttrs attrs;
 
-        if (cpu->kvm_vcpu_dirty) {
+        if (cpu->vcpu_dirty) {
             kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
-            cpu->kvm_vcpu_dirty = false;
+            cpu->vcpu_dirty = false;
         }
 
         kvm_arch_pre_run(cpu, run);
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 097db5cae1..3ada8b54d4 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -232,7 +232,7 @@ int hax_init_vcpu(CPUState *cpu)
     }
 
     cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
-    cpu->hax_vcpu_dirty = true;
+    cpu->vcpu_dirty = true;
     qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
 
     return ret;
@@ -598,12 +598,12 @@ static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
     CPUArchState *env = cpu->env_ptr;
 
     hax_arch_get_registers(env);
-    cpu->hax_vcpu_dirty = true;
+    cpu->vcpu_dirty = true;
 }
 
 void hax_cpu_synchronize_state(CPUState *cpu)
 {
-    if (!cpu->hax_vcpu_dirty) {
+    if (!cpu->vcpu_dirty) {
         run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
     }
 }
@@ -614,7 +614,7 @@ static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
     CPUArchState *env = cpu->env_ptr;
 
     hax_vcpu_sync_state(env, 1);
-    cpu->hax_vcpu_dirty = false;
+    cpu->vcpu_dirty = false;
 }
 
 void hax_cpu_synchronize_post_reset(CPUState *cpu)
@@ -627,7 +627,7 @@ static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
     CPUArchState *env = cpu->env_ptr;
 
     hax_vcpu_sync_state(env, 1);
-    cpu->hax_vcpu_dirty = false;
+    cpu->vcpu_dirty = false;
 }
 
 void hax_cpu_synchronize_post_init(CPUState *cpu)
@@ -637,7 +637,7 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
 
 static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
 {
-    cpu->hax_vcpu_dirty = true;
+    cpu->vcpu_dirty = true;
 }
 
 void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index 0982e874bb..3317905e71 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -523,7 +523,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
      * already saved and can be restored when it is synced back to KVM.
      */
     if (!running) {
-        if (!cs->kvm_vcpu_dirty) {
+        if (!cs->vcpu_dirty) {
             ret = kvm_mips_save_count(cs);
             if (ret < 0) {
                 fprintf(stderr, "Failed saving count\n");
@@ -539,7 +539,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
             return;
         }
 
-        if (!cs->kvm_vcpu_dirty) {
+        if (!cs->vcpu_dirty) {
             ret = kvm_mips_restore_count(cs);
             if (ret < 0) {
                 fprintf(stderr, "Failed restoring count\n");
--
2.13.0


Reply | Threaded
Open this post in threaded view
|

Re: [PATCH] vcpu_dirty: share the same field in CPUState for all accelerators

Paolo Bonzini-5


On 18/06/2017 21:11, Sergio Andres Gomez Del Real wrote:

> This patch simply replaces the separate boolean field in CPUState that
> kvm, hax (and upcoming hvf) have for keeping track of vcpu dirtiness
> with a single shared field.
>
> Signed-off-by: Sergio Andres Gomez Del Real <[hidden email]>
> ---
>  include/qom/cpu.h     |  5 +++--
>  kvm-all.c             | 18 +++++++++---------
>  target/i386/hax-all.c | 12 ++++++------
>  target/mips/kvm.c     |  4 ++--
>  4 files changed, 20 insertions(+), 19 deletions(-)
>
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index 89ddb686fb..2098eeae03 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -369,7 +369,6 @@ struct CPUState {
>      vaddr mem_io_vaddr;
>  
>      int kvm_fd;
> -    bool kvm_vcpu_dirty;
>      struct KVMState *kvm_state;
>      struct kvm_run *kvm_run;
>  
> @@ -386,6 +385,9 @@ struct CPUState {
>      uint32_t can_do_io;
>      int32_t exception_index; /* used by m68k TCG */
>  
> +    /* shared by kvm, hax and hvf */
> +    bool vcpu_dirty;
> +
>      /* Used to keep track of an outstanding cpu throttle thread for migration
>       * autoconverge
>       */
> @@ -400,7 +402,6 @@ struct CPUState {
>          icount_decr_u16 u16;
>      } icount_decr;
>  
> -    bool hax_vcpu_dirty;
>      struct hax_vcpu_state *hax_vcpu;
>  
>      /* The pending_tlb_flush flag is set and cleared atomically to
> diff --git a/kvm-all.c b/kvm-all.c
> index ab8262f672..a5eaff270e 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -318,7 +318,7 @@ int kvm_init_vcpu(CPUState *cpu)
>  
>      cpu->kvm_fd = ret;
>      cpu->kvm_state = s;
> -    cpu->kvm_vcpu_dirty = true;
> +    cpu->vcpu_dirty = true;
>  
>      mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
>      if (mmap_size < 0) {
> @@ -1864,15 +1864,15 @@ void kvm_flush_coalesced_mmio_buffer(void)
>  
>  static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
>  {
> -    if (!cpu->kvm_vcpu_dirty) {
> +    if (!cpu->vcpu_dirty) {
>          kvm_arch_get_registers(cpu);
> -        cpu->kvm_vcpu_dirty = true;
> +        cpu->vcpu_dirty = true;
>      }
>  }
>  
>  void kvm_cpu_synchronize_state(CPUState *cpu)
>  {
> -    if (!cpu->kvm_vcpu_dirty) {
> +    if (!cpu->vcpu_dirty) {
>          run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
>      }
>  }
> @@ -1880,7 +1880,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
>  static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
>  {
>      kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
> -    cpu->kvm_vcpu_dirty = false;
> +    cpu->vcpu_dirty = false;
>  }
>  
>  void kvm_cpu_synchronize_post_reset(CPUState *cpu)
> @@ -1891,7 +1891,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
>  static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
>  {
>      kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
> -    cpu->kvm_vcpu_dirty = false;
> +    cpu->vcpu_dirty = false;
>  }
>  
>  void kvm_cpu_synchronize_post_init(CPUState *cpu)
> @@ -1901,7 +1901,7 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
>  
>  static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
>  {
> -    cpu->kvm_vcpu_dirty = true;
> +    cpu->vcpu_dirty = true;
>  }
>  
>  void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
> @@ -1981,9 +1981,9 @@ int kvm_cpu_exec(CPUState *cpu)
>      do {
>          MemTxAttrs attrs;
>  
> -        if (cpu->kvm_vcpu_dirty) {
> +        if (cpu->vcpu_dirty) {
>              kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
> -            cpu->kvm_vcpu_dirty = false;
> +            cpu->vcpu_dirty = false;
>          }
>  
>          kvm_arch_pre_run(cpu, run);
> diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
> index 097db5cae1..3ada8b54d4 100644
> --- a/target/i386/hax-all.c
> +++ b/target/i386/hax-all.c
> @@ -232,7 +232,7 @@ int hax_init_vcpu(CPUState *cpu)
>      }
>  
>      cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
> -    cpu->hax_vcpu_dirty = true;
> +    cpu->vcpu_dirty = true;
>      qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
>  
>      return ret;
> @@ -598,12 +598,12 @@ static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
>      CPUArchState *env = cpu->env_ptr;
>  
>      hax_arch_get_registers(env);
> -    cpu->hax_vcpu_dirty = true;
> +    cpu->vcpu_dirty = true;
>  }
>  
>  void hax_cpu_synchronize_state(CPUState *cpu)
>  {
> -    if (!cpu->hax_vcpu_dirty) {
> +    if (!cpu->vcpu_dirty) {
>          run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
>      }
>  }
> @@ -614,7 +614,7 @@ static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
>      CPUArchState *env = cpu->env_ptr;
>  
>      hax_vcpu_sync_state(env, 1);
> -    cpu->hax_vcpu_dirty = false;
> +    cpu->vcpu_dirty = false;
>  }
>  
>  void hax_cpu_synchronize_post_reset(CPUState *cpu)
> @@ -627,7 +627,7 @@ static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
>      CPUArchState *env = cpu->env_ptr;
>  
>      hax_vcpu_sync_state(env, 1);
> -    cpu->hax_vcpu_dirty = false;
> +    cpu->vcpu_dirty = false;
>  }
>  
>  void hax_cpu_synchronize_post_init(CPUState *cpu)
> @@ -637,7 +637,7 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
>  
>  static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
>  {
> -    cpu->hax_vcpu_dirty = true;
> +    cpu->vcpu_dirty = true;
>  }
>  
>  void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
> diff --git a/target/mips/kvm.c b/target/mips/kvm.c
> index 0982e874bb..3317905e71 100644
> --- a/target/mips/kvm.c
> +++ b/target/mips/kvm.c
> @@ -523,7 +523,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
>       * already saved and can be restored when it is synced back to KVM.
>       */
>      if (!running) {
> -        if (!cs->kvm_vcpu_dirty) {
> +        if (!cs->vcpu_dirty) {
>              ret = kvm_mips_save_count(cs);
>              if (ret < 0) {
>                  fprintf(stderr, "Failed saving count\n");
> @@ -539,7 +539,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
>              return;
>          }
>  
> -        if (!cs->kvm_vcpu_dirty) {
> +        if (!cs->vcpu_dirty) {
>              ret = kvm_mips_restore_count(cs);
>              if (ret < 0) {
>                  fprintf(stderr, "Failed restoring count\n");
>

Thanks, I queued this patch for 2.9.

Paolo

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH] vcpu_dirty: share the same field in CPUState for all accelerators

Eric Blake
On 06/19/2017 11:35 AM, Paolo Bonzini wrote:
>
>
> On 18/06/2017 21:11, Sergio Andres Gomez Del Real wrote:
>> This patch simply replaces the separate boolean field in CPUState that
>> kvm, hax (and upcoming hvf) have for keeping track of vcpu dirtiness
>> with a single shared field.
>>
>> Signed-off-by: Sergio Andres Gomez Del Real <[hidden email]>
>> ---

>>
>
> Thanks, I queued this patch for 2.9.

As in qemu-stable 2.9.1, or did you mean 2.10 mainline?

--
Eric Blake, Principal Software Engineer
Red Hat, Inc.           +1-919-301-3266
Virtualization:  qemu.org | libvirt.org


signature.asc (617 bytes) Download Attachment
Reply | Threaded
Open this post in threaded view
|

Re: [PATCH] vcpu_dirty: share the same field in CPUState for all accelerators

Paolo Bonzini-5


On 27/06/2017 20:36, Eric Blake wrote:

> On 06/19/2017 11:35 AM, Paolo Bonzini wrote:
>>
>>
>> On 18/06/2017 21:11, Sergio Andres Gomez Del Real wrote:
>>> This patch simply replaces the separate boolean field in CPUState that
>>> kvm, hax (and upcoming hvf) have for keeping track of vcpu dirtiness
>>> with a single shared field.
>>>
>>> Signed-off-by: Sergio Andres Gomez Del Real <[hidden email]>
>>> ---
>
>>>
>>
>> Thanks, I queued this patch for 2.9.
>
> As in qemu-stable 2.9.1, or did you mean 2.10 mainline?

2.10 of course.

Paolo