Hi,
On 8/26/25 10:18 AM, Jason Gunthorpe wrote:
Tested-by: Alejandro Jimenez alejandro.j.jimenez@oracle.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com
drivers/iommu/generic_pt/iommu_pt.h | 155 ++++++++++++++++++++++++++++ include/linux/generic_pt/iommu.h | 10 +- 2 files changed, 163 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h index 9413f868a65bfa..53901a4a977935 100644 --- a/drivers/iommu/generic_pt/iommu_pt.h +++ b/drivers/iommu/generic_pt/iommu_pt.h @@ -14,6 +14,29 @@ #include <linux/iommu.h> #include "../iommu-pages.h" #include <linux/export.h> +#include <linux/cleanup.h> +#include <linux/dma-mapping.h>
+static void gather_range_pages(struct iommu_iotlb_gather *iotlb_gather,
struct pt_iommu *iommu_table, pt_vaddr_t iova,
pt_vaddr_t len,
struct iommu_pages_list *free_list)
+{
- struct pt_common *common = common_from_iommu(iommu_table);
- if (pt_feature(common, PT_FEAT_FLUSH_RANGE_NO_GAPS) &&
iommu_iotlb_gather_is_disjoint(iotlb_gather, iova, len)) {
iommu_iotlb_sync(&iommu_table->domain, iotlb_gather);
/*
* Note that the sync frees the gather's free list, so we must
* not have any pages on that list that are covered by iova/len
*/
- } else if (pt_feature(common, PT_FEAT_FLUSH_RANGE)) {
iommu_iotlb_gather_add_range(iotlb_gather, iova, len);
- }
- iommu_pages_list_splice(free_list, &iotlb_gather->freelist);
+} #define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op) @@ -167,6 +190,138 @@ static inline struct pt_table_p *table_alloc_top(struct pt_common *common, log2_to_int(pt_top_memsize_lg2(common, top_of_table))); }
[snip]
+/**
- unmap_pages() - Make a range of IOVA empty/not present
- @iommu_table: Table to manipulate
- @iova: IO virtual address to start
- @pgsize: Length of each page
- @pgcount: Length of the range in pgsize units starting from @iova
- @gather: Gather struct that must be flushed on return
Eh, 2 of these @params don't match the function's arguments (names).
- unmap_pages() will remove a translation created by map_pages(). It cannot
- subdivide a mapping created by map_pages(), so it should be called with IOVA
- ranges that match those passed to map_pages(). The IOVA range can aggregate
- contiguous map_pages() calls so long as no individual range is split.
- Context: The caller must hold a write range lock that includes
- the whole range.
- Returns: Number of bytes of VA unmapped. iova + res will be the point
- unmapping stopped.
- */
+size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather)
+{
- struct pt_iommu *iommu_table =
container_of(domain, struct pt_iommu, domain);
- struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
unmap.free_list) };
- pt_vaddr_t len = pgsize * pgcount;
- struct pt_range range;
- int ret;
- ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
- if (ret)
return 0;
- pt_walk_range(&range, __unmap_range, &unmap);
- gather_range_pages(iotlb_gather, iommu_table, iova, len,
&unmap.free_list);
- return unmap.unmapped;
+} +EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");