Hi,
On Wed, Feb 18, 2026 at 12:14:12PM -0500, Eric Chanudet wrote:
The cma dma-buf heaps let userspace allocate buffers in CMA regions without enforcing limits. Since each cma region registers in dmem, charge against it when allocating a buffer in a cma heap.
Signed-off-by: Eric Chanudet echanude@redhat.com
drivers/dma-buf/heaps/cma_heap.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 49cc45fb42dd7200c3c14384bcfdbe85323454b1..bbd4f9495808da19256d97bd6a4dca3e1b0a30a0 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -27,6 +27,7 @@ #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/cgroup_dmem.h> #define DEFAULT_CMA_NAME "default_cma_region" @@ -58,6 +59,7 @@ struct cma_heap_buffer { pgoff_t pagecount; int vmap_cnt; void *vaddr;
- struct dmem_cgroup_pool_state *pool;
I guess we should add an #if IS_ENABLED #endif guard for dmem?
}; struct dma_heap_attachment { @@ -276,6 +278,7 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) kfree(buffer->pages); /* release memory */ cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
- dmem_cgroup_uncharge(buffer->pool, buffer->len); kfree(buffer);
} @@ -319,9 +322,17 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT;
- if (mem_accounting) {
ret = dmem_cgroup_try_charge(cma_get_dmem_cgroup_region(cma_heap->cma), size,&buffer->pool, NULL);
This alone doesn't call for a new version, but adhering to the kernel coding style would look like this:
+ ret = dmem_cgroup_try_charge(cma_get_dmem_cgroup_region(cma_heap->cma), + size, &buffer->pool, NULL);
It looks good to me otherwise, Acked-by: Maxime Ripard mripard@kernel.org
Maxime