computing-offload/generic_vdpa/qemu/Revert-vfio-pci-Implement-the-DMA-fault-handler.patch
jiangdongxu 79c4324644 add generic_vdpa basecode
Change-Id: I2d302dda68298877c65c99147f5bf22186a59aac
2024-09-19 17:19:46 +08:00

94 lines
3.2 KiB
Diff

From 013220b686022a2e4ddb6a3d50af467275d25070 Mon Sep 17 00:00:00 2001
From: Kunkun Jiang <jiangkunkun@huawei.com>
Date: Fri, 18 Nov 2022 15:22:37 +0800
Subject: [PATCH 18/36] Revert "vfio/pci: Implement the DMA fault handler"
This reverts commit d33cc7eccb68c6a1488804c94ff5c1197ee0fc6e.
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
---
hw/vfio/pci.c | 50 --------------------------------------------------
hw/vfio/pci.h | 1 -
2 files changed, 51 deletions(-)
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index c54e62fe8f..76bc9d3506 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -2953,60 +2953,10 @@ static PCIPASIDOps vfio_pci_pasid_ops = {
static void vfio_dma_fault_notifier_handler(void *opaque)
{
VFIOPCIExtIRQ *ext_irq = opaque;
- VFIOPCIDevice *vdev = ext_irq->vdev;
- PCIDevice *pdev = &vdev->pdev;
- AddressSpace *as = pci_device_iommu_address_space(pdev);
- IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(as->root);
- struct vfio_region_dma_fault header;
- struct iommu_fault *queue;
- char *queue_buffer = NULL;
- ssize_t bytes;
if (!event_notifier_test_and_clear(&ext_irq->notifier)) {
return;
}
-
- bytes = pread(vdev->vbasedev.fd, &header, sizeof(header),
- vdev->dma_fault_region.fd_offset);
- if (bytes != sizeof(header)) {
- error_report("%s unable to read the fault region header (0x%lx)",
- __func__, bytes);
- return;
- }
-
- /* Normally the fault queue is mmapped */
- queue = (struct iommu_fault *)vdev->dma_fault_region.mmaps[0].mmap;
- if (!queue) {
- size_t queue_size = header.nb_entries * header.entry_size;
-
- error_report("%s: fault queue not mmapped: slower fault handling",
- vdev->vbasedev.name);
-
- queue_buffer = g_malloc(queue_size);
- bytes = pread(vdev->vbasedev.fd, queue_buffer, queue_size,
- vdev->dma_fault_region.fd_offset + header.offset);
- if (bytes != queue_size) {
- error_report("%s unable to read the fault queue (0x%lx)",
- __func__, bytes);
- return;
- }
-
- queue = (struct iommu_fault *)queue_buffer;
- }
-
- while (vdev->fault_tail_index != header.head) {
- memory_region_inject_faults(iommu_mr, 1,
- &queue[vdev->fault_tail_index]);
- vdev->fault_tail_index =
- (vdev->fault_tail_index + 1) % header.nb_entries;
- }
- bytes = pwrite(vdev->vbasedev.fd, &vdev->fault_tail_index, 4,
- vdev->dma_fault_region.fd_offset);
- if (bytes != 4) {
- error_report("%s unable to write the fault region tail index (0x%lx)",
- __func__, bytes);
- }
- g_free(queue_buffer);
}
static int vfio_register_ext_irq_handler(VFIOPCIDevice *vdev,
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index 03ac8919ef..eef91065f1 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -146,7 +146,6 @@ struct VFIOPCIDevice {
EventNotifier req_notifier;
VFIOPCIExtIRQ *ext_irqs;
VFIORegion dma_fault_region;
- uint32_t fault_tail_index;
int (*resetfn)(struct VFIOPCIDevice *);
uint32_t vendor_id;
uint32_t device_id;
--
2.27.0