On Fri, Apr 25, 2025 at 10:58:08PM -0700, Nicolin Chen wrote:
For vIOMMU passing through HW resources to user space (VMs), add an mmap infrastructure to map a region of hardware MMIO pages.
Maintain an mt_mmap per ictx for validations. To allow IOMMU drivers to add and delete mmappable regions to/from the mt_mmap, add a pair of new helpers: iommufd_ctx_alloc_mmap() and iommufd_ctx_free_mmap().
Signed-off-by: Nicolin Chen nicolinc@nvidia.com
drivers/iommu/iommufd/iommufd_private.h | 8 +++++ include/linux/iommufd.h | 15 ++++++++++ drivers/iommu/iommufd/driver.c | 39 +++++++++++++++++++++++++ drivers/iommu/iommufd/main.c | 39 +++++++++++++++++++++++++ 4 files changed, 101 insertions(+)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index b974c207ae8a..db5b62ec4abb 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -7,6 +7,7 @@ #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/iova_bitmap.h> +#include <linux/maple_tree.h> #include <linux/rwsem.h> #include <linux/uaccess.h> #include <linux/xarray.h> @@ -44,6 +45,7 @@ struct iommufd_ctx { struct xarray groups; wait_queue_head_t destroy_wait; struct rw_semaphore ioas_creation_lock;
- struct maple_tree mt_mmap;
struct mutex sw_msi_lock; struct list_head sw_msi_list; @@ -55,6 +57,12 @@ struct iommufd_ctx { struct iommufd_ioas *vfio_ioas; }; +/* Entry for iommufd_ctx::mt_mmap */ +struct iommufd_mmap {
- unsigned long pfn_start;
- unsigned long pfn_end;
+};
/*
- The IOVA to PFN map. The map automatically copies the PFNs into multiple
- domains and permits sharing of PFNs between io_pagetable instances. This
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 5dff154e8ce1..d63e2d91be0d 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -236,6 +236,9 @@ int iommufd_object_depend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended); void iommufd_object_undepend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended); +int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx, phys_addr_t base,
size_t size, unsigned long *immap_id);
+void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx, unsigned long immap_id); struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id); int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu, @@ -262,11 +265,23 @@ static inline int iommufd_object_depend(struct iommufd_object *obj_dependent, return -EOPNOTSUPP; } +static inline int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx,
phys_addr_t base, size_t size,
unsigned long *immap_id)
+{
- return -EOPNOTSUPP;
+}
static inline void iommufd_object_undepend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended) { } +static inline void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx,
unsigned long immap_id)
+{ +}
static inline struct device * iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id) { diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c index fb7f8fe40f95..c55336c580dc 100644 --- a/drivers/iommu/iommufd/driver.c +++ b/drivers/iommu/iommufd/driver.c @@ -78,6 +78,45 @@ void iommufd_object_undepend(struct iommufd_object *obj_dependent, } EXPORT_SYMBOL_NS_GPL(iommufd_object_undepend, "IOMMUFD"); +/* Driver should report the output @immap_id to user space for mmap() syscall */ +int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx, phys_addr_t base,
size_t size, unsigned long *immap_id)
+{
- struct iommufd_mmap *immap;
- int rc;
- if (WARN_ON_ONCE(!immap_id))
return -EINVAL;
- if (base & ~PAGE_MASK)
return -EINVAL;
- if (!size || size & ~PAGE_MASK)
return -EINVAL;
- immap = kzalloc(sizeof(*immap), GFP_KERNEL);
- if (!immap)
return -ENOMEM;
- immap->pfn_start = base >> PAGE_SHIFT;
- immap->pfn_end = immap->pfn_start + (size >> PAGE_SHIFT) - 1;
- rc = mtree_alloc_range(&ictx->mt_mmap, immap_id, immap, sizeof(immap),
I believe this should be sizeof(*immap) ?
0, LONG_MAX >> PAGE_SHIFT, GFP_KERNEL);
- if (rc < 0) {
kfree(immap);
return rc;
- }
- /* mmap() syscall will right-shift the immap_id to vma->vm_pgoff */
- *immap_id <<= PAGE_SHIFT;
- return 0;
+} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_alloc_mmap, "IOMMUFD");
+void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx, unsigned long immap_id) +{
- kfree(mtree_erase(&ictx->mt_mmap, immap_id >> PAGE_SHIFT));
+} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_free_mmap, "IOMMUFD");
/* Caller should xa_lock(&viommu->vdevs) to protect the return value */ struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id) diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index ac51d5cfaa61..4b46ea47164d 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -213,6 +213,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp) xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT); xa_init(&ictx->groups); ictx->file = filp;
- mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE); init_waitqueue_head(&ictx->destroy_wait); mutex_init(&ictx->sw_msi_lock); INIT_LIST_HEAD(&ictx->sw_msi_list);
@@ -410,11 +411,49 @@ static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd, return ret; } +/*
- Kernel driver must first do iommufd_ctx_alloc_mmap() to register an mmappable
- MMIO region to the iommufd core to receive an "immap_id". Then, driver should
- report to user space this immap_id and the size of the registered MMIO region
- for @vm_pgoff and @size of an mmap() call, via an IOMMU_VIOMMU_ALLOC ioctl in
- the output fields of its driver-type data structure.
- Note the @size is allowed to be smaller than the registered size as a partial
- mmap starting from the registered base address.
- */
+static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma) +{
- struct iommufd_ctx *ictx = filp->private_data;
- size_t size = vma->vm_end - vma->vm_start;
- struct iommufd_mmap *immap;
- if (size & ~PAGE_MASK)
return -EINVAL;
- if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- if (vma->vm_flags & VM_EXEC)
return -EPERM;
- /* vm_pgoff carries an index (immap_id) to an mtree entry (immap) */
- immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff);
- if (!immap)
return -ENXIO;
- if (size >> PAGE_SHIFT > immap->pfn_end - immap->pfn_start + 1)
return -ENXIO;
- vma->vm_pgoff = 0;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
- return remap_pfn_range(vma, vma->vm_start, immap->pfn_start, size,
vma->vm_page_prot);
+}
static const struct file_operations iommufd_fops = { .owner = THIS_MODULE, .open = iommufd_fops_open, .release = iommufd_fops_release, .unlocked_ioctl = iommufd_fops_ioctl,
- .mmap = iommufd_fops_mmap,
}; /**
Thanks, Praan
-- 2.43.0