computing-offload/generic_vdpa/qemu/vfio.h-and-iommu.h-header-update-against-5.10.patch
jiangdongxu 79c4324644 add generic_vdpa basecode
Change-Id: I2d302dda68298877c65c99147f5bf22186a59aac
2024-09-19 17:19:46 +08:00

702 lines
24 KiB
Diff

From 36b65d7312a343cb636e6963b8262dce9420ebc6 Mon Sep 17 00:00:00 2001
From: Kunkun Jiang <jiangkunkun@huawei.com>
Date: Fri, 30 Jul 2021 09:15:31 +0800
Subject: [PATCH] vfio.h and iommu.h header update against 5.10
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
---
linux-headers/linux/iommu.h | 395 ++++++++++++++++++++++++++++++++++++
linux-headers/linux/vfio.h | 220 +++++++++++++++++++-
2 files changed, 613 insertions(+), 2 deletions(-)
create mode 100644 linux-headers/linux/iommu.h
diff --git a/linux-headers/linux/iommu.h b/linux-headers/linux/iommu.h
new file mode 100644
index 0000000000..773b7dc2d6
--- /dev/null
+++ b/linux-headers/linux/iommu.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * IOMMU user API definitions
+ */
+
+#ifndef IOMMU_H
+#define IOMMU_H
+
+#include <linux/types.h>
+
+#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
+#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
+#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
+#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
+
+/* Generic fault types, can be expanded IRQ remapping fault */
+enum iommu_fault_type {
+ IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */
+ IOMMU_FAULT_PAGE_REQ, /* page request fault */
+};
+
+enum iommu_fault_reason {
+ IOMMU_FAULT_REASON_UNKNOWN = 0,
+
+ /* Could not access the PASID table (fetch caused external abort) */
+ IOMMU_FAULT_REASON_PASID_FETCH,
+
+ /* PASID entry is invalid or has configuration errors */
+ IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
+
+ /*
+ * PASID is out of range (e.g. exceeds the maximum PASID
+ * supported by the IOMMU) or disabled.
+ */
+ IOMMU_FAULT_REASON_PASID_INVALID,
+
+ /*
+ * An external abort occurred fetching (or updating) a translation
+ * table descriptor
+ */
+ IOMMU_FAULT_REASON_WALK_EABT,
+
+ /*
+ * Could not access the page table entry (Bad address),
+ * actual translation fault
+ */
+ IOMMU_FAULT_REASON_PTE_FETCH,
+
+ /* Protection flag check failed */
+ IOMMU_FAULT_REASON_PERMISSION,
+
+ /* access flag check failed */
+ IOMMU_FAULT_REASON_ACCESS,
+
+ /* Output address of a translation stage caused Address Size fault */
+ IOMMU_FAULT_REASON_OOR_ADDRESS,
+};
+
+/**
+ * struct iommu_fault_unrecoverable - Unrecoverable fault data
+ * @reason: reason of the fault, from &enum iommu_fault_reason
+ * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
+ * @pasid: Process Address Space ID
+ * @perm: requested permission access using by the incoming transaction
+ * (IOMMU_FAULT_PERM_* values)
+ * @addr: offending page address
+ * @fetch_addr: address that caused a fetch abort, if any
+ */
+struct iommu_fault_unrecoverable {
+ __u32 reason;
+#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
+#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
+#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
+ __u32 flags;
+ __u32 pasid;
+ __u32 perm;
+ __u64 addr;
+ __u64 fetch_addr;
+};
+
+/**
+ * struct iommu_fault_page_request - Page Request data
+ * @flags: encodes whether the corresponding fields are valid and whether this
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
+ * @addr: page address
+ * @private_data: device-specific private information
+ */
+struct iommu_fault_page_request {
+#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
+#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
+#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
+ __u32 flags;
+ __u32 pasid;
+ __u32 grpid;
+ __u32 perm;
+ __u64 addr;
+ __u64 private_data[2];
+};
+
+/**
+ * struct iommu_fault - Generic fault data
+ * @type: fault type from &enum iommu_fault_type
+ * @padding: reserved for future use (should be zero)
+ * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
+ * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
+ * @padding2: sets the fault size to allow for future extensions
+ */
+struct iommu_fault {
+ __u32 type;
+ __u32 padding;
+ union {
+ struct iommu_fault_unrecoverable event;
+ struct iommu_fault_page_request prm;
+ __u8 padding2[56];
+ };
+};
+
+/**
+ * enum iommu_page_response_code - Return status of fault handlers
+ * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is "Success" in PCI PRI.
+ * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
+ * this device if possible. This is "Response Failure" in PCI PRI.
+ * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is "Invalid Request" in PCI PRI.
+ */
+enum iommu_page_response_code {
+ IOMMU_PAGE_RESP_SUCCESS = 0,
+ IOMMU_PAGE_RESP_INVALID,
+ IOMMU_PAGE_RESP_FAILURE,
+};
+
+/**
+ * struct iommu_page_response - Generic page response information
+ * @argsz: User filled size of this data
+ * @version: API version of this structure
+ * @flags: encodes whether the corresponding fields are valid
+ * (IOMMU_FAULT_PAGE_RESPONSE_* values)
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @code: response code from &enum iommu_page_response_code
+ */
+struct iommu_page_response {
+ __u32 argsz;
+#define IOMMU_PAGE_RESP_VERSION_1 1
+ __u32 version;
+#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
+ __u32 flags;
+ __u32 pasid;
+ __u32 grpid;
+ __u32 code;
+};
+
+/* defines the granularity of the invalidation */
+enum iommu_inv_granularity {
+ IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
+ IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
+ IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
+ IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
+};
+
+/**
+ * struct iommu_inv_addr_info - Address Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the address-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If ARCHID bit is set, @archid is populated and the invalidation relates
+ * to cache entries tagged with this architecture specific ID and matching
+ * the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - If neither PASID or ARCHID is set, global addr invalidation applies.
+ * - The LEAF flag indicates whether only the leaf PTE caching needs to be
+ * invalidated and other paging structure caches can be preserved.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ * @addr: first stage/level input address
+ * @granule_size: page/block size of the mapping in bytes
+ * @nb_granules: number of contiguous granules to be invalidated
+ */
+struct iommu_inv_addr_info {
+#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
+#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+ __u64 addr;
+ __u64 granule_size;
+ __u64 nb_granules;
+};
+
+/**
+ * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the PASID-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If the ARCHID bit is set, the @archid is populated and the invalidation
+ * relates to cache entries tagged with this architecture specific ID and
+ * matching the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - At least one of PASID or ARCHID must be set.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ */
+struct iommu_inv_pasid_info {
+#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+};
+
+/**
+ * struct iommu_cache_invalidate_info - First level/stage invalidation
+ * information
+ * @argsz: User filled size of this data
+ * @version: API version of this structure
+ * @cache: bitfield that allows to select which caches to invalidate
+ * @granularity: defines the lowest granularity used for the invalidation:
+ * domain > PASID > addr
+ * @padding: reserved for future use (should be zero)
+ * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
+ * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
+ *
+ * Not all the combinations of cache/granularity are valid:
+ *
+ * +--------------+---------------+---------------+---------------+
+ * | type / | DEV_IOTLB | IOTLB | PASID |
+ * | granularity | | | cache |
+ * +==============+===============+===============+===============+
+ * | DOMAIN | N/A | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | PASID | Y | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | ADDR | Y | Y | N/A |
+ * +--------------+---------------+---------------+---------------+
+ *
+ * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
+ * @version and @cache.
+ *
+ * If multiple cache types are invalidated simultaneously, they all
+ * must support the used granularity.
+ */
+struct iommu_cache_invalidate_info {
+ __u32 argsz;
+#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
+ __u32 version;
+/* IOMMU paging structure cache */
+#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
+#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
+#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
+#define IOMMU_CACHE_INV_TYPE_NR (3)
+ __u8 cache;
+ __u8 granularity;
+ __u8 padding[6];
+ union {
+ struct iommu_inv_pasid_info pasid_info;
+ struct iommu_inv_addr_info addr_info;
+ } granu;
+};
+
+/**
+ * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
+ * SVA binding.
+ *
+ * @flags: VT-d PASID table entry attributes
+ * @pat: Page attribute table data to compute effective memory type
+ * @emt: Extended memory type
+ *
+ * Only guest vIOMMU selectable and effective options are passed down to
+ * the host IOMMU.
+ */
+struct iommu_gpasid_bind_data_vtd {
+#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
+#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
+#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
+#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
+#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
+#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
+#define IOMMU_SVA_VTD_GPASID_LAST (1 << 6)
+ __u64 flags;
+ __u32 pat;
+ __u32 emt;
+};
+
+#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
+ IOMMU_SVA_VTD_GPASID_EMTE | \
+ IOMMU_SVA_VTD_GPASID_PCD | \
+ IOMMU_SVA_VTD_GPASID_PWT)
+
+/**
+ * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
+ * @argsz: User filled size of this data
+ * @version: Version of this data structure
+ * @format: PASID table entry format
+ * @flags: Additional information on guest bind request
+ * @gpgd: Guest page directory base of the guest mm to bind
+ * @hpasid: Process address space ID used for the guest mm in host IOMMU
+ * @gpasid: Process address space ID used for the guest mm in guest IOMMU
+ * @addr_width: Guest virtual address width
+ * @padding: Reserved for future use (should be zero)
+ * @vtd: Intel VT-d specific data
+ *
+ * Guest to host PASID mapping can be an identity or non-identity, where guest
+ * has its own PASID space. For non-identify mapping, guest to host PASID lookup
+ * is needed when VM programs guest PASID into an assigned device. VMM may
+ * trap such PASID programming then request host IOMMU driver to convert guest
+ * PASID to host PASID based on this bind data.
+ */
+struct iommu_gpasid_bind_data {
+ __u32 argsz;
+#define IOMMU_GPASID_BIND_VERSION_1 1
+ __u32 version;
+#define IOMMU_PASID_FORMAT_INTEL_VTD 1
+#define IOMMU_PASID_FORMAT_LAST 2
+ __u32 format;
+ __u32 addr_width;
+#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
+ __u64 flags;
+ __u64 gpgd;
+ __u64 hpasid;
+ __u64 gpasid;
+ __u8 padding[8];
+ /* Vendor specific data */
+ union {
+ struct iommu_gpasid_bind_data_vtd vtd;
+ } vendor;
+};
+
+/**
+ * struct iommu_pasid_smmuv3 - ARM SMMUv3 Stream Table Entry stage 1 related
+ * information
+ * @version: API version of this structure
+ * @s1fmt: STE s1fmt (format of the CD table: single CD, linear table
+ * or 2-level table)
+ * @s1dss: STE s1dss (specifies the behavior when @pasid_bits != 0
+ * and no PASID is passed along with the incoming transaction)
+ * @padding: reserved for future use (should be zero)
+ *
+ * The PASID table is referred to as the Context Descriptor (CD) table on ARM
+ * SMMUv3. Please refer to the ARM SMMU 3.x spec (ARM IHI 0070A) for full
+ * details.
+ */
+struct iommu_pasid_smmuv3 {
+#define PASID_TABLE_SMMUV3_CFG_VERSION_1 1
+ __u32 version;
+ __u8 s1fmt;
+ __u8 s1dss;
+ __u8 padding[2];
+};
+
+/**
+ * struct iommu_pasid_table_config - PASID table data used to bind guest PASID
+ * table to the host IOMMU
+ * @argsz: User filled size of this data
+ * @version: API version to prepare for future extensions
+ * @base_ptr: guest physical address of the PASID table
+ * @format: format of the PASID table
+ * @pasid_bits: number of PASID bits used in the PASID table
+ * @config: indicates whether the guest translation stage must
+ * be translated, bypassed or aborted.
+ * @padding: reserved for future use (should be zero)
+ * @vendor_data.smmuv3: table information when @format is
+ * %IOMMU_PASID_FORMAT_SMMUV3
+ */
+struct iommu_pasid_table_config {
+ __u32 argsz;
+#define PASID_TABLE_CFG_VERSION_1 1
+ __u32 version;
+ __u64 base_ptr;
+#define IOMMU_PASID_FORMAT_SMMUV3 1
+ __u32 format;
+ __u8 pasid_bits;
+#define IOMMU_PASID_CONFIG_TRANSLATE 1
+#define IOMMU_PASID_CONFIG_BYPASS 2
+#define IOMMU_PASID_CONFIG_ABORT 3
+ __u8 config;
+ __u8 padding[2];
+ union {
+ struct iommu_pasid_smmuv3 smmuv3;
+ } vendor_data;
+};
+
+#endif /* _UAPI_IOMMU_H */
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index f4ff038e8c..cf8e208fac 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/iommu.h>
#define VFIO_API_VERSION 0
@@ -334,6 +335,7 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_TYPE_GFX (1)
#define VFIO_REGION_TYPE_CCW (2)
#define VFIO_REGION_TYPE_MIGRATION (3)
+#define VFIO_REGION_TYPE_NESTED (4)
/* sub-types for VFIO_REGION_TYPE_PCI_* */
@@ -362,6 +364,10 @@ struct vfio_region_info_cap_type {
/* sub-types for VFIO_REGION_TYPE_GFX */
#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
+/* sub-types for VFIO_REGION_TYPE_NESTED */
+#define VFIO_REGION_SUBTYPE_NESTED_DMA_FAULT (1)
+#define VFIO_REGION_SUBTYPE_NESTED_DMA_FAULT_RESPONSE (2)
+
/**
* struct vfio_region_gfx_edid - EDID region layout.
*
@@ -721,11 +727,30 @@ struct vfio_irq_info {
#define VFIO_IRQ_INFO_MASKABLE (1 << 1)
#define VFIO_IRQ_INFO_AUTOMASKED (1 << 2)
#define VFIO_IRQ_INFO_NORESIZE (1 << 3)
+#define VFIO_IRQ_INFO_FLAG_CAPS (1 << 4) /* Info supports caps */
__u32 index; /* IRQ index */
__u32 count; /* Number of IRQs within this index */
+ __u32 cap_offset; /* Offset within info struct of first cap */
};
#define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9)
+/*
+ * The irq type capability allows IRQs unique to a specific device or
+ * class of devices to be exposed.
+ *
+ * The structures below define version 1 of this capability.
+ */
+#define VFIO_IRQ_INFO_CAP_TYPE 3
+
+struct vfio_irq_info_cap_type {
+ struct vfio_info_cap_header header;
+ __u32 type; /* global per bus driver */
+ __u32 subtype; /* type specific */
+};
+
+#define VFIO_IRQ_TYPE_NESTED (1)
+#define VFIO_IRQ_SUBTYPE_DMA_FAULT (1)
+
/**
* VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
*
@@ -827,7 +852,8 @@ enum {
VFIO_PCI_MSIX_IRQ_INDEX,
VFIO_PCI_ERR_IRQ_INDEX,
VFIO_PCI_REQ_IRQ_INDEX,
- VFIO_PCI_NUM_IRQS
+ VFIO_PCI_NUM_IRQS = 5 /* Fixed user ABI, IRQ indexes >=5 use */
+ /* device specific cap to define content */
};
/*
@@ -1012,6 +1038,68 @@ struct vfio_device_feature {
*/
#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
+/*
+ * Capability exposed by the DMA fault region
+ * @version: ABI version
+ */
+#define VFIO_REGION_INFO_CAP_DMA_FAULT 6
+
+struct vfio_region_info_cap_fault {
+ struct vfio_info_cap_header header;
+ __u32 version;
+};
+
+/*
+ * Capability exposed by the DMA fault response region
+ * @version: ABI version
+ */
+#define VFIO_REGION_INFO_CAP_DMA_FAULT_RESPONSE 7
+
+struct vfio_region_info_cap_fault_response {
+ struct vfio_info_cap_header header;
+ __u32 version;
+};
+
+/*
+ * DMA Fault Region Layout
+ * @tail: index relative to the start of the ring buffer at which the
+ * consumer finds the next item in the buffer
+ * @entry_size: fault ring buffer entry size in bytes
+ * @nb_entries: max capacity of the fault ring buffer
+ * @offset: ring buffer offset relative to the start of the region
+ * @head: index relative to the start of the ring buffer at which the
+ * producer (kernel) inserts items into the buffers
+ */
+struct vfio_region_dma_fault {
+ /* Write-Only */
+ __u32 tail;
+ /* Read-Only */
+ __u32 entry_size;
+ __u32 nb_entries;
+ __u32 offset;
+ __u32 head;
+};
+
+/*
+ * DMA Fault Response Region Layout
+ * @head: index relative to the start of the ring buffer at which the
+ * producer (userspace) insert responses into the buffer
+ * @entry_size: fault ring buffer entry size in bytes
+ * @nb_entries: max capacity of the fault ring buffer
+ * @offset: ring buffer offset relative to the start of the region
+ * @tail: index relative to the start of the ring buffer at which the
+ * consumer (kernel) finds the next item in the buffer
+ */
+struct vfio_region_dma_fault_response {
+ /* Write-Only */
+ __u32 head;
+ /* Read-Only */
+ __u32 entry_size;
+ __u32 nb_entries;
+ __u32 offset;
+ __u32 tail;
+};
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -1124,7 +1212,7 @@ struct vfio_iommu_type1_dma_map {
struct vfio_bitmap {
__u64 pgsize; /* page size for bitmap in bytes */
__u64 size; /* in bytes */
- __u64 *data; /* one bit per page */
+ __u64 *data; /* one bit per page */
};
/**
@@ -1250,6 +1338,134 @@ struct vfio_iommu_type1_dirty_bitmap_get {
#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
+/*
+ * VFIO_IOMMU_BIND_PROCESS
+ *
+ * Allocate a PASID for a process address space, and use it to attach this
+ * process to all devices in the container. Devices can then tag their DMA
+ * traffic with the returned @pasid to perform transactions on the associated
+ * virtual address space. Mapping and unmapping buffers is performed by standard
+ * functions such as mmap and malloc.
+ *
+ * If flag is VFIO_IOMMU_BIND_PID, @pid contains the pid of a foreign process to
+ * bind. Otherwise the current task is bound. Given that the caller owns the
+ * device, setting this flag grants the caller read and write permissions on the
+ * entire address space of foreign process described by @pid. Therefore,
+ * permission to perform the bind operation on a foreign process is governed by
+ * the ptrace access mode PTRACE_MODE_ATTACH_REALCREDS check. See man ptrace(2)
+ * for more information.
+ *
+ * On success, VFIO writes a Process Address Space ID (PASID) into @pasid. This
+ * ID is unique to a process and can be used on all devices in the container.
+ *
+ * On fork, the child inherits the device fd and can use the bonds setup by its
+ * parent. Consequently, the child has R/W access on the address spaces bound by
+ * its parent. After an execv, the device fd is closed and the child doesn't
+ * have access to the address space anymore.
+ *
+ * To remove a bond between process and container, VFIO_IOMMU_UNBIND ioctl is
+ * issued with the same parameters. If a pid was specified in VFIO_IOMMU_BIND,
+ * it should also be present for VFIO_IOMMU_UNBIND. Otherwise unbind the current
+ * task from the container.
+ */
+struct vfio_iommu_type1_bind_process {
+ __u32 flags;
+#define VFIO_IOMMU_BIND_PID (1 << 0)
+ __u32 pasid;
+ __s32 pid;
+};
+
+/*
+ * Only mode supported at the moment is VFIO_IOMMU_BIND_PROCESS, which takes
+ * vfio_iommu_type1_bind_process in data.
+ */
+struct vfio_iommu_type1_bind {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_BIND_PROCESS (1 << 0)
+ __u8 data[];
+};
+
+/*
+ * VFIO_IOMMU_BIND - _IOWR(VFIO_TYPE, VFIO_BASE + 22, struct vfio_iommu_bind)
+ *
+ * Manage address spaces of devices in this container. Initially a TYPE1
+ * container can only have one address space, managed with
+ * VFIO_IOMMU_MAP/UNMAP_DMA.
+ *
+ * An IOMMU of type VFIO_TYPE1_NESTING_IOMMU can be managed by both MAP/UNMAP
+ * and BIND ioctls at the same time. MAP/UNMAP acts on the stage-2 (host) page
+ * tables, and BIND manages the stage-1 (guest) page tables. Other types of
+ * IOMMU may allow MAP/UNMAP and BIND to coexist, where MAP/UNMAP controls
+ * non-PASID traffic and BIND controls PASID traffic. But this depends on the
+ * underlying IOMMU architecture and isn't guaranteed.
+ *
+ * Availability of this feature depends on the device, its bus, the underlying
+ * IOMMU and the CPU architecture.
+ *
+ * returns: 0 on success, -errno on failure.
+ */
+#define VFIO_IOMMU_BIND _IO(VFIO_TYPE, VFIO_BASE + 22)
+
+/*
+ * VFIO_IOMMU_UNBIND - _IOWR(VFIO_TYPE, VFIO_BASE + 23, struct vfio_iommu_bind)
+ *
+ * Undo what was done by the corresponding VFIO_IOMMU_BIND ioctl.
+ */
+#define VFIO_IOMMU_UNBIND _IO(VFIO_TYPE, VFIO_BASE + 23)
+
+/*
+ * VFIO_IOMMU_SET_PASID_TABLE - _IOWR(VFIO_TYPE, VFIO_BASE + 18,
+ * struct vfio_iommu_type1_set_pasid_table)
+ *
+ * The SET operation passes a PASID table to the host while the
+ * UNSET operation detaches the one currently programmed. It is
+ * allowed to "SET" the table several times without unsetting as
+ * long as the table config does not stay IOMMU_PASID_CONFIG_TRANSLATE.
+ */
+struct vfio_iommu_type1_set_pasid_table {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_PASID_TABLE_FLAG_SET (1 << 0)
+#define VFIO_PASID_TABLE_FLAG_UNSET (1 << 1)
+ struct iommu_pasid_table_config config; /* used on SET */
+};
+
+#define VFIO_IOMMU_SET_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/**
+ * VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_iommu_type1_cache_invalidate)
+ *
+ * Propagate guest IOMMU cache invalidation to the host.
+ */
+struct vfio_iommu_type1_cache_invalidate {
+ __u32 argsz;
+ __u32 flags;
+ struct iommu_cache_invalidate_info info;
+};
+#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/**
+ * VFIO_IOMMU_SET_MSI_BINDING - _IOWR(VFIO_TYPE, VFIO_BASE + 20,
+ * struct vfio_iommu_type1_set_msi_binding)
+ *
+ * Pass a stage 1 MSI doorbell mapping to the host so that this
+ * latter can build a nested stage2 mapping. Or conversely tear
+ * down a previously bound stage 1 MSI binding.
+ */
+struct vfio_iommu_type1_set_msi_binding {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_BIND_MSI (1 << 0)
+#define VFIO_IOMMU_UNBIND_MSI (1 << 1)
+ __u64 iova; /* MSI guest IOVA */
+ /* Fields below are used on BIND */
+ __u64 gpa; /* MSI guest physical address */
+ __u64 size; /* size of stage1 mapping (bytes) */
+};
+#define VFIO_IOMMU_SET_MSI_BINDING _IO(VFIO_TYPE, VFIO_BASE + 20)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
--
2.27.0