From: Ackerley Tng ackerleytng@google.com
The migration_in_progress flag will also be needed for migration of non-sev VMs.
Co-developed-by: Sagi Shahar sagis@google.com Signed-off-by: Sagi Shahar sagis@google.com Co-developed-by: Vishal Annapurve vannapurve@google.com Signed-off-by: Vishal Annapurve vannapurve@google.com Signed-off-by: Ackerley Tng ackerleytng@google.com Signed-off-by: Ryan Afranji afranji@google.com --- arch/x86/kvm/svm/sev.c | 17 ++++++----------- arch/x86/kvm/svm/svm.h | 1 - include/linux/kvm_host.h | 1 + 3 files changed, 7 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0bc708ee2788..89c06cfcc200 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1838,8 +1838,6 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm); - struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm); int r = -EBUSY;
if (dst_kvm == src_kvm) @@ -1849,10 +1847,10 @@ static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) * Bail if these VMs are already involved in a migration to avoid * deadlock between two VMs trying to migrate to/from each other. */ - if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) + if (atomic_cmpxchg_acquire(&dst_kvm->migration_in_progress, 0, 1)) return -EBUSY;
- if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) + if (atomic_cmpxchg_acquire(&src_kvm->migration_in_progress, 0, 1)) goto release_dst;
r = -EINTR; @@ -1865,21 +1863,18 @@ static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) unlock_dst: mutex_unlock(&dst_kvm->lock); release_src: - atomic_set_release(&src_sev->migration_in_progress, 0); + atomic_set_release(&src_kvm->migration_in_progress, 0); release_dst: - atomic_set_release(&dst_sev->migration_in_progress, 0); + atomic_set_release(&dst_kvm->migration_in_progress, 0); return r; }
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm); - struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm); - mutex_unlock(&dst_kvm->lock); mutex_unlock(&src_kvm->lock); - atomic_set_release(&dst_sev->migration_in_progress, 0); - atomic_set_release(&src_sev->migration_in_progress, 0); + atomic_set_release(&dst_kvm->migration_in_progress, 0); + atomic_set_release(&src_kvm->migration_in_progress, 0); }
/* vCPU mutex subclasses. */ diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index d4490eaed55d..35df8be621c5 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -107,7 +107,6 @@ struct kvm_sev_info { struct list_head mirror_vms; /* List of VMs mirroring */ struct list_head mirror_entry; /* Use as a list entry of mirrors */ struct misc_cg *misc_cg; /* For misc cgroup accounting */ - atomic_t migration_in_progress; void *snp_context; /* SNP guest context page */ void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */ void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1dedc421b3e3..0c1d637a6e7d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -862,6 +862,7 @@ struct kvm { /* Protected by slots_locks (for writes) and RCU (for reads) */ struct xarray mem_attr_array; #endif + atomic_t migration_in_progress; char stats_id[KVM_STATS_NAME_SIZE]; };