On 18.08.20 10:04, Hyesoo Yu wrote:
This patch adds support for a chunk heap that allows for buffers that are made up of a list of fixed size chunks taken from a CMA. Chunk sizes are configuratd when the heaps are created.
Signed-off-by: Hyesoo Yu hyesoo.yu@samsung.com
drivers/dma-buf/heaps/Kconfig | 9 ++ drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/chunk_heap.c | 222 +++++++++++++++++++++++++++++++++++++ 3 files changed, 232 insertions(+) create mode 100644 drivers/dma-buf/heaps/chunk_heap.c
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06..98552fa 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,12 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here.
+config DMABUF_HEAPS_CHUNK
- tristate "DMA-BUF CHUNK Heap"
- depends on DMABUF_HEAPS && DMA_CMA
- help
Choose this option to enable dma-buf CHUNK heap. This heap is backed
by the Contiguous Memory Allocator (CMA) and allocate the buffers that
are made up to a list of fixed size chunks tasken from CMA. Chunk sizes
are configurated when the heaps are created.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cde..3b2a0986 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -2,3 +2,4 @@ obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o +obj-$(CONFIG_DMABUF_HEAPS_CHUNK) += chunk_heap.o diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c new file mode 100644 index 0000000..1eefaec --- /dev/null +++ b/drivers/dma-buf/heaps/chunk_heap.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- ION Memory Allocator chunk heap exporter
- Copyright (c) 2020 Samsung Electronics Co., Ltd.
- Author: hyesoo.yu@samsung.com for Samsung Electronics.
- */
+#include <linux/platform_device.h> +#include <linux/cma.h> +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/dma-contiguous.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/highmem.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/scatterlist.h> +#include <linux/sched/signal.h> +#include <linux/of_reserved_mem.h> +#include <linux/of.h>
+#include "heap-helpers.h"
+struct chunk_heap {
- struct dma_heap *heap;
- phys_addr_t base;
- phys_addr_t size;
- atomic_t cur_pageblock_idx;
- unsigned int max_num_pageblocks;
- unsigned int order;
+};
+static void chunk_heap_free(struct heap_helper_buffer *buffer) +{
- struct chunk_heap *chunk_heap = dma_heap_get_drvdata(buffer->heap);
- pgoff_t pg;
- for (pg = 0; pg < buffer->pagecount; pg++)
__free_pages(buffer->pages[pg], chunk_heap->order);
- kvfree(buffer->pages);
- kfree(buffer);
+}
+static inline unsigned long chunk_get_next_pfn(struct chunk_heap *chunk_heap) +{
- unsigned long i = atomic_inc_return(&chunk_heap->cur_pageblock_idx) %
chunk_heap->max_num_pageblocks;
- return PHYS_PFN(chunk_heap->base) + i * pageblock_nr_pages;
+}
+static int chunk_alloc_pages(struct chunk_heap *chunk_heap, struct page **pages,
unsigned int order, unsigned int count)
+{
- unsigned long base;
- unsigned int i = 0, nr_block = 0, nr_elem, ret;
- while (count) {
/*
* If the number of scanned page block is the same as max block,
* the tries of allocation fails.
*/
if (nr_block++ == chunk_heap->max_num_pageblocks) {
ret = -ENOMEM;
goto err_bulk;
}
base = chunk_get_next_pfn(chunk_heap);
nr_elem = min_t(unsigned int, count, pageblock_nr_pages >> order);
ret = alloc_pages_bulk(base, base + pageblock_nr_pages, MIGRATE_CMA,
GFP_KERNEL, order, nr_elem, pages + i);
So you are bypassing the complete cma allocator here. This all smells like a complete hack to me. No, I don't think this is the right way to support (or rather speed up allocations for) special, weird hardware.