computing-offload/generic_vdpa/qemu/vhost-introduce-bytemap-for-vhost-backend-logging.patch
jiangdongxu 79c4324644 add generic_vdpa basecode
Change-Id: I2d302dda68298877c65c99147f5bf22186a59aac
2024-09-19 17:19:46 +08:00

272 lines
8.9 KiB
Diff

From e2f1953ad26a61e59f1d45892c6937d7454e65b5 Mon Sep 17 00:00:00 2001
From: fangyi <eric.fangyi@huawei.com>
Date: Mon, 4 Dec 2023 15:09:26 +0800
Subject: [PATCH] vhost: introduce bytemap for vhost backend logging
As vhost backend may use bytemap for logging, when get log_size
of vhost device, check whether vhost device support VHOST_BACKEND_F_BYTEMAPLOG.
If vhost device support, use bytemap for logging.
By the way, add log_resize func pointer check and vhost_log_sync return
value check.
Signed-off-by: jiangdongxu <jiangdongxu1@huawei.com>
Signed-off-by: fangyi <eric.fangyi@huawei.com>
---
hw/virtio/vhost.c | 144 ++++++++++++++++++++++++++++++++++++--
include/hw/virtio/vhost.h | 1 +
2 files changed, 139 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 7930b37499..d2b9278474 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -19,9 +19,11 @@
#include "qemu/atomic.h"
#include "qemu/range.h"
#include "qemu/error-report.h"
+#include "cpu.h"
#include "qemu/memfd.h"
#include "qemu/log.h"
#include "standard-headers/linux/vhost_types.h"
+#include "exec/ram_addr.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "migration/blocker.h"
@@ -30,6 +32,7 @@
#include "sysemu/dma.h"
#include "sysemu/tcg.h"
#include "trace.h"
+#include "qapi/qapi-commands-migration.h"
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1
@@ -45,6 +48,11 @@
do { } while (0)
#endif
+static inline bool vhost_bytemap_log_support(struct vhost_dev *dev)
+{
+ return (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_BYTEMAPLOG));
+}
+
static struct vhost_log *vhost_log;
static struct vhost_log *vhost_log_shm;
@@ -213,12 +221,93 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
return 0;
}
+#define BYTES_PER_LONG (sizeof(unsigned long))
+#define BYTE_WORD(nr) ((nr) / BYTES_PER_LONG)
+#define BYTES_TO_LONGS(nr) DIV_ROUND_UP(nr, BYTES_PER_LONG)
+
+static inline int64_t _set_dirty_bytemap_atomic(unsigned long *bytemap, unsigned long cur_pfn)
+{
+ char *byte_of_long = (char *)bytemap;
+ int i;
+ int64_t dirty_num = 0;
+
+ for (i = 0; i < BYTES_PER_LONG; i++) {
+ if (byte_of_long[i]) {
+ cpu_physical_memory_set_dirty_range((cur_pfn + i) << TARGET_PAGE_BITS,
+ TARGET_PAGE_SIZE,
+ 1 << DIRTY_MEMORY_MIGRATION);
+ /* Per byte ops, no need to atomic_xchg */
+ byte_of_long[i] = 0;
+ dirty_num++;
+ }
+ }
+
+ return dirty_num;
+}
+
+static inline int64_t cpu_physical_memory_set_dirty_bytemap(unsigned long *bytemap,
+ ram_addr_t start,
+ ram_addr_t pages)
+{
+ unsigned long i;
+ unsigned long len = BYTES_TO_LONGS(pages);
+ unsigned long pfn = (start >> TARGET_PAGE_BITS) /
+ BYTES_PER_LONG * BYTES_PER_LONG;
+ int64_t dirty_mig_bits = 0;
+
+ for (i = 0; i < len; i++) {
+ if (bytemap[i]) {
+ dirty_mig_bits += _set_dirty_bytemap_atomic(&bytemap[i],
+ pfn + BYTES_PER_LONG * i);
+ }
+ }
+
+ return dirty_mig_bits;
+}
+
+static int vhost_sync_dirty_bytemap(struct vhost_dev *dev,
+ MemoryRegionSection *section)
+{
+ struct vhost_log *log = dev->log;
+
+ ram_addr_t start = section->offset_within_region +
+ memory_region_get_ram_addr(section->mr);
+ ram_addr_t pages = int128_get64(section->size) >> TARGET_PAGE_BITS;
+
+ hwaddr idx = BYTE_WORD(
+ section->offset_within_address_space >> TARGET_PAGE_BITS);
+
+ return cpu_physical_memory_set_dirty_bytemap((unsigned long *)log->log + idx,
+ start, pages);
+}
+
static void vhost_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
- vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
+ MigrationState *ms = migrate_get_current();
+
+ if (!dev->log_enabled || !dev->started) {
+ return;
+ }
+
+ if (dev->vhost_ops->vhost_log_sync) {
+ int r = dev->vhost_ops->vhost_log_sync(dev);
+ if (r < 0) {
+ error_report("Failed to sync dirty log: 0x%x\n", r);
+ if (migration_is_running(ms->state)) {
+ qmp_migrate_cancel(NULL);
+ }
+ return;
+ }
+ }
+
+ if (vhost_bytemap_log_support(dev)) {
+ vhost_sync_dirty_bytemap(dev, section);
+ } else {
+ vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
+ }
}
static void vhost_log_sync_range(struct vhost_dev *dev,
@@ -228,7 +317,11 @@ static void vhost_log_sync_range(struct vhost_dev *dev,
/* FIXME: this is N^2 in number of sections */
for (i = 0; i < dev->n_mem_sections; ++i) {
MemoryRegionSection *section = &dev->mem_sections[i];
- vhost_sync_dirty_bitmap(dev, section, first, last);
+ if (vhost_bytemap_log_support(dev)) {
+ vhost_sync_dirty_bytemap(dev, section);
+ } else {
+ vhost_sync_dirty_bitmap(dev, section, first, last);
+ }
}
}
@@ -236,11 +329,19 @@ static uint64_t vhost_get_log_size(struct vhost_dev *dev)
{
uint64_t log_size = 0;
int i;
+ uint64_t vhost_log_chunk_size;
+
+ if (vhost_bytemap_log_support(dev)) {
+ vhost_log_chunk_size = VHOST_LOG_CHUNK_BYTES;
+ } else {
+ vhost_log_chunk_size = VHOST_LOG_CHUNK;
+ }
+
for (i = 0; i < dev->mem->nregions; ++i) {
struct vhost_memory_region *reg = dev->mem->regions + i;
uint64_t last = range_get_last(reg->guest_phys_addr,
reg->memory_size);
- log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
+ log_size = MAX(log_size, last / vhost_log_chunk_size + 1);
}
return log_size;
}
@@ -358,12 +459,21 @@ static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
dev->vhost_ops->vhost_requires_shm_log(dev);
}
-static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
+static inline int vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
{
struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
- uint64_t log_base = (uintptr_t)log->log;
+ uint64_t log_base;
+ int log_fd;
int r;
+ if (!log) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ log_base = (uint64_t)log->log;
+ log_fd = log_fd;
+
/* inform backend of log switching, this must be done before
releasing the current log, to ensure no logging is lost */
r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
@@ -371,9 +481,19 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
}
+ if (dev->vhost_ops->vhost_set_log_size) {
+ r = dev->vhost_ops->vhost_set_log_size(dev, size, dev->log);
+ if (r < 0) {
+ VHOST_OPS_DEBUG(r, "vhost_set_log_size failed");
+ }
+ }
+
vhost_log_put(dev, true);
dev->log = log;
dev->log_size = size;
+
+out:
+ return r;
}
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
@@ -990,7 +1110,11 @@ static int vhost_migration_log(MemoryListener *listener, bool enable)
}
vhost_log_put(dev, false);
} else {
- vhost_dev_log_resize(dev, vhost_get_log_size(dev));
+ r = vhost_dev_log_resize(dev, vhost_get_log_size(dev));
+ if ( r < 0 ) {
+ return r;
+ }
+
r = vhost_dev_set_log(dev, true);
if (r < 0) {
goto check_dev_state;
@@ -1967,6 +2091,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
goto fail_log;
}
+
+ if (hdev->vhost_ops->vhost_set_log_size) {
+ r = hdev->vhost_ops->vhost_set_log_size(hdev, hdev->log_size, hdev->log);
+ if (r < 0) {
+ VHOST_OPS_DEBUG(r, "vhost_set_log_size failed");
+ goto fail_log;
+ }
+ }
}
if (vrings) {
r = vhost_dev_set_vring_enable(hdev, true);
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 420f93e5cd..0491fe1ed7 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -40,6 +40,7 @@ typedef unsigned long vhost_log_chunk_t;
#define VHOST_LOG_PAGE 0x1000
#define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
#define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
+#define VHOST_LOG_CHUNK_BYTES (VHOST_LOG_PAGE * sizeof(vhost_log_chunk_t))
#define VHOST_INVALID_FEATURE_BIT (0xff)
#define VHOST_QUEUE_NUM_CONFIG_INR 0
--
2.27.0