From: Jiri Pirko jiri@nvidia.com
Current CC designs don't place a vIOMMU in front of untrusted devices. Instead, the DMA API forces all untrusted device DMA through swiotlb bounce buffers (is_swiotlb_force_bounce()) which copies data into decrypted memory on behalf of the device.
When a caller has already arranged for the memory to be decrypted via set_memory_decrypted(), the DMA API needs to know so it can map directly using the unencrypted physical address rather than bounce buffering. Following the pattern of DMA_ATTR_MMIO, add DMA_ATTR_CC_DECRYPTED for this purpose. Like the MMIO case, only the caller knows what kind of memory it has and must inform the DMA API for it to work correctly.
Signed-off-by: Jiri Pirko jiri@nvidia.com --- v3->v4: - added some sanity checks to dma_map_phys and dma_unmap_phys - enhanced documentation of DMA_ATTR_CC_DECRYPTED attr v1->v2: - rebased on top of recent dma-mapping-fixes --- include/linux/dma-mapping.h | 10 ++++++++++ include/trace/events/dma.h | 3 ++- kernel/dma/direct.h | 14 +++++++++++--- kernel/dma/mapping.c | 13 +++++++++++-- 4 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 29973baa0581..476964d2b22f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -85,6 +85,16 @@ * a cacheline must have this attribute for this to be considered safe. */ #define DMA_ATTR_CPU_CACHE_CLEAN (1UL << 11) +/* + * DMA_ATTR_CC_DECRYPTED: Indicates the DMA mapping is decrypted (shared) for + * confidential computing guests. For normal system memory the caller must have + * called set_memory_decrypted(), and pgprot_decrypted must be used when + * creating CPU PTEs for the mapping. The same decrypted semantic may be passed + * to the vIOMMU when it sets up the IOPTE. For MMIO use together with + * DMA_ATTR_MMIO to indicate decrypted MMIO. Unless DMA_ATTR_MMIO is provided + * a struct page is required. + */ +#define DMA_ATTR_CC_DECRYPTED (1UL << 12)
/* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h index 33e99e792f1a..b8082d5177c4 100644 --- a/include/trace/events/dma.h +++ b/include/trace/events/dma.h @@ -32,7 +32,8 @@ TRACE_DEFINE_ENUM(DMA_NONE); { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \ { DMA_ATTR_NO_WARN, "NO_WARN" }, \ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \ - { DMA_ATTR_MMIO, "MMIO" }) + { DMA_ATTR_MMIO, "MMIO" }, \ + { DMA_ATTR_CC_DECRYPTED, "CC_DECRYPTED" })
DECLARE_EVENT_CLASS(dma_map, TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index e89f175e9c2d..c047a9d0fda3 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -84,16 +84,24 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev, dma_addr_t dma_addr;
if (is_swiotlb_force_bounce(dev)) { - if (attrs & DMA_ATTR_MMIO) - return DMA_MAPPING_ERROR; + if (!(attrs & DMA_ATTR_CC_DECRYPTED)) { + if (attrs & DMA_ATTR_MMIO) + return DMA_MAPPING_ERROR;
- return swiotlb_map(dev, phys, size, dir, attrs); + return swiotlb_map(dev, phys, size, dir, attrs); + } + } else if (attrs & DMA_ATTR_CC_DECRYPTED) { + return DMA_MAPPING_ERROR; }
if (attrs & DMA_ATTR_MMIO) { dma_addr = phys; if (unlikely(!dma_capable(dev, dma_addr, size, false))) goto err_overflow; + } else if (attrs & DMA_ATTR_CC_DECRYPTED) { + dma_addr = phys_to_dma_unencrypted(dev, phys); + if (unlikely(!dma_capable(dev, dma_addr, size, false))) + goto err_overflow; } else { dma_addr = phys_to_dma(dev, phys); if (unlikely(!dma_capable(dev, dma_addr, size, true)) || diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 3928a509c44c..abb0c88b188b 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -157,6 +157,7 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, { const struct dma_map_ops *ops = get_dma_ops(dev); bool is_mmio = attrs & DMA_ATTR_MMIO; + bool is_cc_decrypted = attrs & DMA_ATTR_CC_DECRYPTED; dma_addr_t addr = DMA_MAPPING_ERROR;
BUG_ON(!valid_dma_direction(dir)); @@ -165,8 +166,11 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, return DMA_MAPPING_ERROR;
if (dma_map_direct(dev, ops) || - (!is_mmio && arch_dma_map_phys_direct(dev, phys + size))) + (!is_mmio && !is_cc_decrypted && + arch_dma_map_phys_direct(dev, phys + size))) addr = dma_direct_map_phys(dev, phys, size, dir, attrs); + else if (is_cc_decrypted) + return DMA_MAPPING_ERROR; else if (use_dma_iommu(dev)) addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); else if (ops->map_phys) @@ -203,11 +207,16 @@ void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, { const struct dma_map_ops *ops = get_dma_ops(dev); bool is_mmio = attrs & DMA_ATTR_MMIO; + bool is_cc_decrypted = attrs & DMA_ATTR_CC_DECRYPTED;
BUG_ON(!valid_dma_direction(dir)); + if (dma_map_direct(dev, ops) || - (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size))) + (!is_mmio && !is_cc_decrypted && + arch_dma_unmap_phys_direct(dev, addr + size))) dma_direct_unmap_phys(dev, addr, size, dir, attrs); + else if (is_cc_decrypted) + return; else if (use_dma_iommu(dev)) iommu_dma_unmap_phys(dev, addr, size, dir, attrs); else if (ops->unmap_phys)