Add new apis: wd_blkpool_new; wd_blkpool_delete; wd_blkpool_phy; wd_blkpool_alloc; wd_blkpool_free; wd_blkpool_setup; wd_blkpool_destroy_mem; wd_blkpool_create_sglpool; wd_blkpool_destroy_sglpool;
App only use two apis after setup blkpool wd_blkpool_alloc; wd_blkpool_free;
Signed-off-by: Zhangfei Gao zhangfei.gao@linaro.org --- Makefile.am | 1 + include/wd_bmm.h | 66 +++++ libwd.map | 10 + wd_bmm.c | 739 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 816 insertions(+) create mode 100644 include/wd_bmm.h create mode 100644 wd_bmm.c
diff --git a/Makefile.am b/Makefile.am index c4b9c52..87b3811 100644 --- a/Makefile.am +++ b/Makefile.am @@ -60,6 +60,7 @@ libwd_la_SOURCES=wd.c wd_mempool.c wd.h wd_alg.c wd_alg.h \ v1/wd_bmm.c v1/wd_bmm.h \ v1/wd_ecc.c v1/wd_ecc.h \ v1/wd_sgl.c v1/wd_sgl.h \ + wd_bmm.c \ aes.h sm4.h galois.h \ lib/crypto/aes.c lib/crypto/sm4.c lib/crypto/galois.c \ v1/drv/hisi_qm_udrv.c v1/drv/hisi_qm_udrv.h \ diff --git a/include/wd_bmm.h b/include/wd_bmm.h new file mode 100644 index 0000000..15443fd --- /dev/null +++ b/include/wd_bmm.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright 2025-2026 Huawei Technologies Co.,Ltd. All rights reserved. + * Copyright 2025-2026 Linaro ltd. + */ + +#ifndef _WD_BMM_H +#define _WD_BMM_H + +#include <asm/types.h> +#include "wd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DEFAULT_BLK_ALIGN 0x1000 +#define DEFAULT_BLOCK_NM 16384 +#define DEFAULT_ALIGN_SIZE 0x40 + +/* the max sge num in one sgl */ +#define HISI_SGE_NUM_IN_SGL 255 + +/* the max sge num in on BD, QM user it be the sgl pool size */ +#define HISI_SGL_NUM_IN_BD 256 + +/* memory APIs for Algorithm Layer */ +typedef void *(*wd_alloc)(void *usr, size_t size); +typedef void (*wd_free)(void *usr, void *va); + + /* memory VA to DMA address map */ +typedef void *(*wd_map)(void *usr, void *va, size_t sz); +typedef __u32 (*wd_bufsize)(void *usr); + +/* Memory from user, it is given at ctx creating. */ +struct wd_mm_br { + wd_alloc alloc; /* Memory allocation */ + wd_free free; /* Memory free */ + wd_map iova_map; /* get iova from user space VA */ + void *usr; /* data for the above operations */ + wd_bufsize get_bufsize; /* optional */ +}; + +/* Memory pool creating parameters */ +struct wd_blkpool_setup { + __u32 block_size; /* Block buffer size */ + __u32 block_num; /* Block buffer number */ + __u32 align_size; /* Block buffer starting address align size */ + struct wd_mm_br br; /* memory from user if don't use WD memory */ +}; + + +void *wd_blkpool_new(handle_t h_ctx); +void wd_blkpool_delete(void *pool); +int wd_blkpool_setup(void *pool, struct wd_blkpool_setup *setup); +void wd_blkpool_destroy_mem(void *pool); +void *wd_blkpool_alloc(void *pool, size_t size); +void wd_blkpool_free(void *pool, void *va); +void *wd_blkpool_phy(void *pool, void *va); +handle_t wd_blkpool_create_sglpool(void *pool, __u32 sgl_num, __u32 sge_num); +void wd_blkpool_destroy_sglpool(void *pool, handle_t sgl_pool); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libwd.map b/libwd.map index 5522ec0..e884671 100644 --- a/libwd.map +++ b/libwd.map @@ -49,5 +49,15 @@ global: wd_enable_drv; wd_disable_drv; wd_get_alg_head; + + wd_blkpool_new; + wd_blkpool_delete; + wd_blkpool_phy; + wd_blkpool_alloc; + wd_blkpool_free; + wd_blkpool_setup; + wd_blkpool_destroy_mem; + wd_blkpool_create_sglpool; + wd_blkpool_destroy_sglpool; local: *; }; diff --git a/wd_bmm.c b/wd_bmm.c new file mode 100644 index 0000000..c44c7d6 --- /dev/null +++ b/wd_bmm.c @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: Apache-2.0 +/* + * Copyright 2025-2026 Huawei Technologies Co.,Ltd. All rights reserved. + * Copyright 2025-2026 Linaro ltd. + */ + +/* Block Memory Management (lib): A block memory algorithm */ +#include <asm/byteorder.h> +#include <stdio.h> +#include <string.h> +#include <stdint.h> +#include <unistd.h> +#include <stdlib.h> +#include <errno.h> +#include <sys/queue.h> +#include <sys/mman.h> +#include <pthread.h> + +#include "wd.h" +#include "wd_bmm.h" + +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) + +#define TAG_FREE 0x12345678 /* block is free */ +#define TAG_USED 0x87654321 /* block is busy */ +#define MAX_ALIGN_SIZE 0x1000 /* 4KB */ +#define MAX_BLOCK_SIZE 0x10000000 /* 256MB */ +#define BLK_BALANCE_SZ 0x100000ul +#define NUM_TIMES(x) (87 * (x) / 100) + +struct wd_blk_hd { + unsigned int blk_tag; + void *blk_dma; + void *blk; + + TAILQ_ENTRY(wd_blk_hd) next; +}; + +TAILQ_HEAD(wd_blk_list, wd_blk_hd); + +struct wd_ss_region { + void *va; + unsigned long long pa; + size_t size; + + TAILQ_ENTRY(wd_ss_region) next; +}; + +TAILQ_HEAD(wd_ss_region_list, wd_ss_region); + +struct wd_blkpool { + pthread_spinlock_t lock; + unsigned int free_blk_num; + unsigned int blk_num; + unsigned int alloc_failures; + struct wd_blk_list head; + void *act_start; + unsigned int hd_sz; + unsigned int blk_sz; + struct wd_blkpool_setup setup; + + handle_t ctx; + void *mem; + unsigned long size; + struct wd_ss_region_list ss_list; + struct wd_ss_region_list *ss_head; +}; + +static struct wd_blk_hd *wd_blk_head(struct wd_blkpool *pool, void *blk) +{ + unsigned long offset = (unsigned long)((uintptr_t)blk - + (uintptr_t)pool->act_start); + unsigned long sz = pool->hd_sz + pool->blk_sz; + unsigned long blk_idx = offset / sz; + + return (struct wd_blk_hd *)((uintptr_t)pool->act_start + blk_idx * sz); +} + +static int pool_params_check(struct wd_blkpool_setup *setup) +{ + if (!setup->block_size || + setup->block_size > MAX_BLOCK_SIZE) { + WD_ERR("Invalid block_size (%x)!\n", + setup->block_size); + return -WD_EINVAL; + } + + /* check parameters, and align_size must be 2^N */ + if (setup->align_size == 0x1 || setup->align_size > MAX_ALIGN_SIZE || + setup->align_size & (setup->align_size - 0x1)) { + WD_ERR("Invalid align_size.\n"); + return -WD_EINVAL; + } + + return WD_SUCCESS; +} + +#define WD_UACCE_GRAN_SIZE 0x10000ull +static int wd_pool_pre_layout(struct wd_blkpool *p, + struct wd_blkpool_setup *sp) +{ + unsigned int asz; + int ret; + + ret = pool_params_check(sp); + if (ret) + return ret; + + asz = sp->align_size; + + /* Get actual value by align */ + p->hd_sz = ALIGN(sizeof(struct wd_blk_hd), asz); + p->blk_sz = ALIGN(sp->block_size, asz); + if (p->size == 0 && !p->mem) { + p->size = (p->hd_sz + p->blk_sz) * + (unsigned long)sp->block_num + asz; + + /* Make sure memory map granularity size align */ + if (wd_is_noiommu(p->ctx)) + p->size = ALIGN(p->size, WD_UACCE_GRAN_SIZE); + } + + return WD_SUCCESS; +} + +static void *wd_get_phys(struct wd_blkpool *pool, void *va) +{ + struct wd_ss_region *rgn; + + TAILQ_FOREACH(rgn, pool->ss_head, next) { + if (rgn->va <= va && va < rgn->va + rgn->size) + return (void *)(uintptr_t)(rgn->pa + + ((uintptr_t)va - (uintptr_t)rgn->va)); + } + + return NULL; +} + +static int wd_pool_init(struct wd_blkpool *p) +{ + __u32 blk_size = p->setup.block_size; + void *dma_start, *dma_end, *va; + struct wd_blk_hd *hd = NULL; + unsigned int dma_num = 0; + unsigned int i, act_num; + unsigned long loss; + + p->act_start = (void *)ALIGN((uintptr_t)p->mem, + p->setup.align_size); + loss = p->act_start - p->mem; + + act_num = (p->size - loss) / (p->hd_sz + p->blk_sz); + + /* get dma address and initialize blocks */ + for (i = 0; i < act_num; i++) { + va = (void *)((uintptr_t)p->act_start + p->hd_sz + + (unsigned long)(p->hd_sz + + p->blk_sz) * i); + dma_start = wd_get_phys(p, va); + dma_end = wd_get_phys(p, va + blk_size - 1); + if (!dma_start || !dma_end) { + WD_ERR("wd_get_phys err.\n"); + return -WD_ENOMEM; + } + + if ((uintptr_t)dma_end - (uintptr_t)dma_start != blk_size - 1) + continue; + + hd = (void *)((uintptr_t)va - p->hd_sz); + hd->blk_dma = dma_start; + hd->blk = va; + hd->blk_tag = TAG_FREE; + TAILQ_INSERT_TAIL(&p->head, hd, next); + + dma_num++; + } + + p->free_blk_num = dma_num; + p->blk_num = dma_num; + + return WD_SUCCESS; +} + +static int usr_pool_init(struct wd_blkpool *p) +{ + struct wd_blkpool_setup *sp = &p->setup; + __u32 blk_size = sp->block_size; + struct wd_blk_hd *hd = NULL; + unsigned long loss; + unsigned int i, act_num; + + p->act_start = (void *)ALIGN((uintptr_t)p->mem, + sp->align_size); + loss = p->act_start - p->mem; + act_num = (p->size - loss) / (p->hd_sz + p->blk_sz); + + for (i = 0; i < act_num; i++) { + hd = (void *)((uintptr_t)p->act_start + (p->hd_sz + p->blk_sz) * i); + hd->blk = (void *)((uintptr_t)hd + p->hd_sz); + hd->blk_dma = sp->br.iova_map(sp->br.usr, hd->blk, blk_size); + if (!hd->blk_dma) { + WD_ERR("failed to map usr blk.\n"); + return -WD_ENOMEM; + } + hd->blk_tag = TAG_FREE; + TAILQ_INSERT_TAIL(&p->head, hd, next); + } + + p->free_blk_num = act_num; + p->blk_num = p->free_blk_num; + + return WD_SUCCESS; +} + +static void drv_free_slice(struct wd_blkpool *p) +{ + struct wd_ss_region *rgn; + + while (true) { + rgn = TAILQ_FIRST(&p->ss_list); + if (!rgn) + break; + TAILQ_REMOVE(&p->ss_list, rgn, next); + free(rgn); + } +} + +static void drv_add_slice(struct wd_blkpool *p, struct wd_ss_region *rgn) +{ + struct wd_ss_region *rg; + + rg = TAILQ_LAST(&p->ss_list, wd_ss_region_list); + if (rg) { + if (rg->pa + rg->size == rgn->pa) { + rg->size += rgn->size; + free(rgn); + return; + } + } + + TAILQ_INSERT_TAIL(&p->ss_list, rgn, next); +} + +#define WD_UACCE_GRAN_SHIFT 16 +#define WD_UACCE_GRAN_NUM_MASK 0xfffull +static void *pool_reserve_mem(struct wd_blkpool *p, size_t size) +{ + struct wd_ss_region *rgn = NULL; + unsigned long info = 0; + size_t tmp = 0; + unsigned long i = 0; + void *ptr = NULL; + int ret = 1; + + if (!p->ctx) + return NULL; + + if (p->mem) + return NULL; + + ptr = wd_reserve_mem(p->ctx, size); + if (!ptr) + return NULL; + + p->ss_head = &p->ss_list; + TAILQ_INIT(&p->ss_list); + + while (ret > 0) { + info = i; + ret = wd_ctx_set_io_cmd(p->ctx, UACCE_CMD_GET_SS_DMA, &info); + if (ret < 0) { + WD_ERR("get DMA fail!\n"); + goto err_out; + } + rgn = malloc(sizeof(*rgn)); + if (!rgn) { + WD_ERR("alloc ss region fail!\n"); + goto err_out; + } + memset(rgn, 0, sizeof(*rgn)); + + if (wd_is_noiommu(p->ctx)) + rgn->size = (info & WD_UACCE_GRAN_NUM_MASK) << + WD_UACCE_GRAN_SHIFT; + else + rgn->size = p->size; + rgn->pa = info & (~WD_UACCE_GRAN_NUM_MASK); + rgn->va = ptr + tmp; + tmp += rgn->size; + drv_add_slice(p, rgn); + i++; + } + + return ptr; + +err_out: + drv_free_slice(p); + munmap(p->mem, size); + + return NULL; +} + +static int pool_init(struct wd_blkpool *pool, + struct wd_blkpool_setup *setup) +{ + void *addr = NULL; + + /* use user's memory, and its br alloc function */ + if (setup->br.alloc && setup->br.free) { + if (!pool->mem) { + addr = setup->br.alloc(setup->br.usr, pool->size); + if (!addr) { + WD_ERR("failed to allocate memory in user pool.\n"); + return -EINVAL; + } + pool->mem = addr; + } + if (usr_pool_init(pool)) { + WD_ERR("failed to initialize user pool.\n"); + setup->br.free(setup->br.usr, addr); + return -EINVAL; + } + } else { + if (!pool->mem) { + /* use wd to reserve memory */ + addr = pool_reserve_mem(pool, pool->size); + if (!addr) { + WD_ERR("wd pool failed to reserve memory.\n"); + return -EINVAL; + } + pool->mem = addr; + } + + if (wd_pool_init(pool)) { + WD_ERR("failed to initialize wd pool.\n"); + wd_blkpool_destroy_mem(pool); + return -EINVAL; + } + } + + return 0; +} + +void *wd_blkpool_new(handle_t h_ctx) +{ + struct wd_blkpool *pool; + + if (wd_is_sva(h_ctx)) + return NULL; + + pool = calloc(1, sizeof(*pool)); + if (!pool) { + WD_ERR("failed to malloc pool.\n"); + return NULL; + } + pool->ctx = h_ctx; + + if (pthread_spin_init(&pool->lock, PTHREAD_PROCESS_SHARED) != 0) { + free(pool); + return NULL; + } + return pool; +} + +int wd_blkpool_setup(void *pool, struct wd_blkpool_setup *setup) +{ + struct wd_blkpool *p = pool; + int ret = 0; + + if (!p || !setup) + return -EINVAL; + + pthread_spin_lock(&p->lock); + if (p->mem && p->size != 0) { + if (p->setup.block_size == setup->block_size || + p->blk_sz == ALIGN(setup->block_size, setup->align_size)) + goto out; + + /* re-org blk_size, no need reserve mem */ + if (p->free_blk_num != p->blk_num) { + WD_ERR("Can not reset blk pool, as it's in use.\n"); + ret = -EINVAL; + goto out; + } + } + + memcpy(&p->setup, setup, sizeof(p->setup)); + + ret = wd_pool_pre_layout(p, setup); + if (ret) + goto out; + + TAILQ_INIT(&p->head); + + ret = pool_init(p, setup); + +out: + pthread_spin_unlock(&p->lock); + return ret; +} + +void *wd_blkpool_alloc(void *pool, size_t size) +{ + struct wd_blkpool *p = pool; + struct wd_blk_hd *hd; + int ret; + + if (unlikely(!p)) { + WD_ERR("blk alloc pool is null!\n"); + return NULL; + } + + if (!p->mem || size > p->blk_sz) { + struct wd_blkpool_setup setup; + /* + * if empty pool, will reserve mem and init pool + * if size > blk_size, will re-org as align 4K if free pool + */ + + memset(&setup, 0, sizeof(setup)); + setup.block_size = ALIGN(size, DEFAULT_BLK_ALIGN); + setup.block_num = DEFAULT_BLOCK_NM; + setup.align_size = DEFAULT_ALIGN_SIZE; + ret = wd_blkpool_setup(p, &setup); + if (ret) + return NULL; + } + + pthread_spin_lock(&p->lock); + hd = TAILQ_LAST(&p->head, wd_blk_list); + if (unlikely(!hd || hd->blk_tag != TAG_FREE)) { + p->alloc_failures++; + goto out; + } + + /* Delete the block buffer from free list */ + TAILQ_REMOVE(&p->head, hd, next); + p->free_blk_num--; + hd->blk_tag = TAG_USED; + pthread_spin_unlock(&p->lock); + + return hd->blk; + +out: + pthread_spin_unlock(&p->lock); + WD_ERR("Failed to malloc blk.\n"); + + return NULL; +} + +void wd_blkpool_free(void *pool, void *va) +{ + struct wd_blkpool *p = pool; + struct wd_blk_hd *hd; + + if (unlikely(!p || !va)) { + WD_ERR("free blk parameters err!\n"); + return; + } + + hd = wd_blk_head(p, va); + if (unlikely(hd->blk_tag != TAG_USED)) { + WD_ERR("free block fail!\n"); + return; + } + + pthread_spin_lock(&p->lock); + TAILQ_INSERT_TAIL(&p->head, hd, next); + p->free_blk_num++; + hd->blk_tag = TAG_FREE; + pthread_spin_unlock(&p->lock); +} + +void *wd_blkpool_phy(void *pool, void *va) +{ + struct wd_blk_hd *hd; + + if (unlikely(!pool || !va)) { + WD_ERR("blk map err, pool is NULL!\n"); + return NULL; + } + + hd = wd_blk_head(pool, va); + if (unlikely(hd->blk_tag != TAG_USED || + (uintptr_t)va < (uintptr_t)hd->blk)) { + WD_ERR("dma map fail!\n"); + return NULL; + } + + return (void *)((uintptr_t)hd->blk_dma + ((uintptr_t)va - + (uintptr_t)hd->blk)); +} + +/* no work */ +void *wd_blkpool_va(void *pool, void *pa) +{ + struct wd_blkpool *p = pool; + struct wd_blk_hd *hd; + + if (unlikely(!pool || !pa)) { + WD_ERR("blk map err, pool is NULL!\n"); + return NULL; + } + + TAILQ_FOREACH(hd, &p->head, next) { + if ((uintptr_t)pa >= (uintptr_t)hd->blk_dma && + (uintptr_t)pa < (uintptr_t)hd->blk_dma + p->blk_sz) { + return (void *)((uintptr_t)hd->blk + + ((uintptr_t)pa - (uintptr_t)hd->blk_dma)); + } + } + + return NULL; +} + +int wd_blkpool_get_free_blk_num(void *pool, __u32 *free_num) +{ + struct wd_blkpool *p = pool; + + if (!p || !free_num) { + WD_ERR("get_free_blk_num err, parameter err!\n"); + return -WD_EINVAL; + } + + *free_num = __atomic_load_n(&p->free_blk_num, __ATOMIC_RELAXED); + + return WD_SUCCESS; +} + +int wd_blkpool_alloc_failures(void *pool, __u32 *fail_num) +{ + struct wd_blkpool *p = pool; + + if (!p || !fail_num) { + WD_ERR("get_blk_alloc_failure err, pool is NULL!\n"); + return -WD_EINVAL; + } + + *fail_num = __atomic_load_n(&p->alloc_failures, __ATOMIC_RELAXED); + + return WD_SUCCESS; +} + +__u32 wd_blkpool_blksize(void *pool) +{ + struct wd_blkpool *p = pool; + + if (!p) { + WD_ERR("get blk_size pool is null!\n"); + return 0; + } + + return p->blk_sz; +} + +void wd_blkpool_destroy_mem(void *pool) +{ + struct wd_blkpool_setup *setup; + struct wd_blkpool *p = pool; + + if (!p) { + WD_ERR("pool destroy err, pool is NULL.\n"); + return; + } + + pthread_spin_lock(&p->lock); + if (p->mem) { + setup = &p->setup; + if (setup->br.free) { + setup->br.free(setup->br.usr, p->mem); + } else { + drv_free_slice(p); + munmap(p->mem, p->size); + } + p->mem = NULL; + p->size = 0; + } + pthread_spin_unlock(&p->lock); +} + +void wd_blkpool_delete(void *pool) +{ + struct wd_blkpool *p = pool; + + if (!p) + return; + + wd_blkpool_destroy_mem(pool); + pthread_spin_destroy(&p->lock); + free(p); +} + +struct hisi_sge { + uintptr_t buff; + void *page_ctrl; + __le32 len; + __le32 pad; + __le32 pad0; + __le32 pad1; +}; + +/* use default hw sgl head size 64B, in little-endian */ +struct hisi_sgl { + /* the next sgl address */ + uintptr_t next_dma; + /* the sge num of all the sgl */ + __le16 entry_sum_in_chain; + /* valid sge(has buff) num in this sgl */ + __le16 entry_sum_in_sgl; + /* the sge num in this sgl */ + __le16 entry_length_in_sgl; + __le16 pad0; + __le64 pad1[5]; + /* valid sge buffs total size */ + __le64 entry_size_in_sgl; + struct hisi_sge sge_entries[]; +}; + +struct hisi_sgl_pool { + /* the addr64 align offset base sgl */ + void **sgl_align; + /* the sgl src address array */ + void **sgl; + /* the sgl pool stack depth */ + __u32 depth; + __u32 top; + __u32 sge_num; + __u32 sgl_num; + pthread_spinlock_t lock; + __u32 blk_num; + void **blks; +}; + +#define ADDR_ALIGN_64(addr) (((uintptr_t)(addr) + 63) & ~63) +handle_t wd_blkpool_create_sglpool(void *pool, __u32 sgl_num, __u32 sge_num) +{ + struct wd_blkpool *p = pool; + struct hisi_sgl_pool *sgl_pool; + struct hisi_sgl *sgl_align; + int sgl_size, size, i, j, blk_num; + __u32 num = 0, num_per_blk; + void *base; + + if (!sge_num || sge_num > HISI_SGE_NUM_IN_SGL || + !sgl_num || sgl_num > HISI_SGL_NUM_IN_BD) { + WD_ERR("failed to create sgl_pool, sgl_num=%u, sge_num=%u!\n", + sgl_num, sge_num); + return 0; + } + + sgl_pool = calloc(1, sizeof(struct hisi_sgl_pool)); + if (!sgl_pool) { + WD_ERR("failed to alloc memory for sgl_pool!\n"); + return 0; + } + + sgl_pool->sgl_align = calloc(sgl_num, sizeof(void *)); + if (!sgl_pool->sgl_align) { + WD_ERR("failed to alloc memory for sgl align!\n"); + goto err_out; + } + + sgl_size = sizeof(struct hisi_sgl) + sge_num * sizeof(struct hisi_sge); + sgl_size = ALIGN(sgl_size, 64); /* 64 bytes aligned */ + num_per_blk = p->blk_sz / sgl_size; + size = num_per_blk * sgl_size; + blk_num = (sgl_num + num_per_blk - 1) / num_per_blk; + sgl_pool->blk_num = blk_num; + + sgl_pool->blks = calloc(blk_num, sizeof(void *)); + if (!sgl_pool->blks) { + WD_ERR("failed to alloc memory for sgl blks!\n"); + goto err_out; + } + + for (i = 0; i < blk_num; i++) { + base = wd_blkpool_alloc(p, size); + if (!base) { + WD_ERR("blk_pool_alloc failed!\n"); + goto err_out; + } + sgl_pool->blks[i] = base; + + for (j = 0; j < num_per_blk; j++) { + sgl_align = (struct hisi_sgl *)ADDR_ALIGN_64(base + sgl_size * j); + sgl_align->entry_sum_in_chain = sge_num; + sgl_align->entry_sum_in_sgl = 0; + sgl_align->entry_length_in_sgl = sge_num; + sgl_align->next_dma = 0; + sgl_pool->sgl_align[num] = sgl_align; + if (++num == sgl_num) + break; + } + } + + if (pthread_spin_init(&sgl_pool->lock, PTHREAD_PROCESS_SHARED) != 0) { + WD_ERR("failed to init sgl pool lock!\n"); + goto err_out; + } + + sgl_pool->sgl_num = sgl_num; + sgl_pool->sge_num = sge_num; + sgl_pool->depth = sgl_num; + sgl_pool->top = sgl_num; + + return (handle_t)sgl_pool; + +err_out: + if (sgl_pool->blks) { + for (i = 0; i < sgl_pool->blk_num; i++) + wd_blkpool_free(p, sgl_pool->blks[i]); + free(sgl_pool->blks); + } + if (sgl_pool->sgl_align) + free(sgl_pool->sgl_align); + free(sgl_pool); + return (handle_t)0; +} + +void wd_blkpool_destroy_sglpool(void *pool, handle_t h_sgl_pool) +{ + struct hisi_sgl_pool *sgl_pool = (struct hisi_sgl_pool *)h_sgl_pool; + struct wd_blkpool *p = pool; + int i; + + if (!h_sgl_pool || !pool) + return; + + pthread_spin_destroy(&sgl_pool->lock); + if (sgl_pool->blks) { + for (i = 0; i < sgl_pool->blk_num; i++) + wd_blkpool_free(p, sgl_pool->blks[i]); + free(sgl_pool->blks); + } + if (sgl_pool->sgl_align) + free(sgl_pool->sgl_align); + free(sgl_pool); +}