computing-offload/generic_vdpa/qemu/vhost-implement-vhost-vdpa-suspend-resume.patch
jiangdongxu 79c4324644 add generic_vdpa basecode
Change-Id: I2d302dda68298877c65c99147f5bf22186a59aac
2024-09-19 17:19:46 +08:00

86 lines
2.7 KiB
Diff

From 98c74a827b742807f979fc36bca99ba0db38d295 Mon Sep 17 00:00:00 2001
From: fangyi <eric.fangyi@huawei.com>
Date: Mon, 4 Dec 2023 15:22:20 +0800
Subject: [PATCH] vhost: implement vhost-vdpa suspend/resume
vhost-vdpa implements the vhost_dev_suspend interface,
which will be called during the shutdown phase of the
live migration source virtual machine to suspend the
device but not reset the device information.
vhost-vdpa implements the vhost_dev_resume interface.
If the live migration fails, it will be called during
the startup phase of the source virtual machine.
Enable the device but set the status, etc.
Signed-off-by: libai <libai12@huawei.com>
Signed-off-by: jiangdongxu <jiangdongxu1@huawei.com>
Signed-off-by: fangyi <eric.fangyi@huawei.com>
---
hw/virtio/vhost-vdpa.c | 44 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 42 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 7663d78b43..7688dc0eba 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1318,6 +1318,43 @@ static unsigned int vhost_vdpa_get_used_memslots(void)
return vhost_vdpa_used_memslots;
}
+static int vhost_vdpa_suspend_device(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ int ret;
+
+ vhost_vdpa_svqs_stop(dev);
+ vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
+
+ if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
+ return 0;
+ }
+
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_SUSPEND, NULL);
+ memory_listener_unregister(&v->listener);
+ return ret;
+}
+
+static int vhost_vdpa_resume_device(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ bool ok;
+
+ vhost_vdpa_host_notifiers_init(dev);
+ ok = vhost_vdpa_svqs_start(dev);
+ if (unlikely(!ok)) {
+ return -1;
+ }
+ vhost_vdpa_set_vring_ready(dev);
+
+ if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
+ return 0;
+ }
+
+ memory_listener_register(&v->listener, &address_space_memory);
+ return vhost_vdpa_call(dev, VHOST_VDPA_RESUME, NULL);
+}
+
static int vhost_vdpa_log_sync(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
@@ -1364,6 +1401,9 @@ const VhostOps vdpa_ops = {
.vhost_force_iommu = vhost_vdpa_force_iommu,
.vhost_log_sync = vhost_vdpa_log_sync,
.vhost_set_config_call = vhost_vdpa_set_config_call,
- .vhost_set_used_memslots = vhost_vdpa_set_used_memslots,
- .vhost_get_used_memslots = vhost_vdpa_get_used_memslots,
+ .vhost_set_used_memslots = vhost_vdpa_set_used_memslots,
+ .vhost_get_used_memslots = vhost_vdpa_get_used_memslots,
+ .vhost_dev_suspend = vhost_vdpa_suspend_device,
+ .vhost_dev_resume = vhost_vdpa_resume_device,
+
};
--
2.27.0