From: Thierry Reding treding@nvidia.com
There is no technical reason why there should be a limited number of CMA regions, so extract some code into helpers and use them to create extra functions (cma_create() and cma_free()) that allow creating and freeing, respectively, CMA regions dynamically at runtime.
Note that these dynamically created CMA areas are treated specially and do not contribute to the number of total CMA pages so that this count still only applies to the fixed number of CMA areas.
Signed-off-by: Thierry Reding treding@nvidia.com --- include/linux/cma.h | 16 ++++++++ mm/cma.c | 89 ++++++++++++++++++++++++++++++++++----------- 2 files changed, 83 insertions(+), 22 deletions(-)
diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..f1e20642198a 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -61,6 +61,10 @@ extern void cma_reserve_pages_on_error(struct cma *cma); struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); bool cma_validate_zones(struct cma *cma); + +struct cma *cma_create(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name); +void cma_free(struct cma *cma); #else static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) { @@ -71,10 +75,22 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio) { return false; } + static inline bool cma_validate_zones(struct cma *cma) { return false; } + +static inline struct cma *cma_create(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, + const char *name) +{ + return NULL; +} + +static inline void cma_free(struct cma *cma) +{ +} #endif
#endif diff --git a/mm/cma.c b/mm/cma.c index e56ec64d0567..8149227d319f 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -214,6 +214,18 @@ void __init cma_reserve_pages_on_error(struct cma *cma) set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); }
+static void __init cma_init_area(struct cma *cma, const char *name, + phys_addr_t size, unsigned int order_per_bit) +{ + if (name) + snprintf(cma->name, CMA_MAX_NAME, "%s", name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->available_count = cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; +} + static int __init cma_new_area(const char *name, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma) @@ -232,13 +244,8 @@ static int __init cma_new_area(const char *name, phys_addr_t size, cma = &cma_areas[cma_area_count]; cma_area_count++;
- if (name) - snprintf(cma->name, CMA_MAX_NAME, "%s", name); - else - snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + cma_init_area(cma, name, size, order_per_bit);
- cma->available_count = cma->count = size >> PAGE_SHIFT; - cma->order_per_bit = order_per_bit; *res_cma = cma; totalcma_pages += cma->count;
@@ -251,6 +258,27 @@ static void __init cma_drop_area(struct cma *cma) cma_area_count--; }
+static int __init cma_check_memory(phys_addr_t base, phys_addr_t size) +{ + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* + * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which + * needs pageblock_order to be initialized. Let's enforce it. + */ + if (!pageblock_order) { + pr_err("pageblock_order not yet initialized. Called during early boot?\n"); + return -EINVAL; + } + + /* ensure minimal alignment required by mm core */ + if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) + return -EINVAL; + + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -271,22 +299,9 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma *cma; int ret;
- /* Sanity checks */ - if (!size || !memblock_is_region_reserved(base, size)) - return -EINVAL; - - /* - * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which - * needs pageblock_order to be initialized. Let's enforce it. - */ - if (!pageblock_order) { - pr_err("pageblock_order not yet initialized. Called during early boot?\n"); - return -EINVAL; - } - - /* ensure minimal alignment required by mm core */ - if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) - return -EINVAL; + ret = cma_check_memory(base, size); + if (ret < 0) + return ret;
ret = cma_new_area(name, size, order_per_bit, &cma); if (ret != 0) @@ -1112,3 +1127,33 @@ void __init *cma_reserve_early(struct cma *cma, unsigned long size)
return ret; } + +struct cma *__init cma_create(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name) +{ + struct cma *cma; + int ret; + + ret = cma_check_memory(base, size); + if (ret < 0) + return ERR_PTR(ret); + + cma = kzalloc(sizeof(*cma), GFP_KERNEL); + if (!cma) + return ERR_PTR(-ENOMEM); + + cma_init_area(cma, name, size, order_per_bit); + cma->ranges[0].base_pfn = PFN_DOWN(base); + cma->ranges[0].early_pfn = PFN_DOWN(base); + cma->ranges[0].count = cma->count; + cma->nranges = 1; + + cma_activate_area(cma); + + return cma; +} + +void cma_free(struct cma *cma) +{ + kfree(cma); +}