aboutsummaryrefslogtreecommitdiffstats
path: root/src/framework/common/mem_mgr/nsfw_nshmem
diff options
context:
space:
mode:
authorqchang <qing.chang1@huawei.com>2018-03-08 17:39:22 -0800
committerqchang <qing.chang1@huawei.com>2018-03-08 17:39:22 -0800
commit697ade6190b23c80e7f60963983786e679759393 (patch)
treedd9782d1e936b8342163b26795e23571d4b1b415 /src/framework/common/mem_mgr/nsfw_nshmem
parent71a4e2f34afa8018426f0e830050e50a1de6d375 (diff)
dmm initial commit
Change-Id: I049ee277cf4efdb83f9c2ac439365fcd421c159b Signed-off-by: qchang <qing.chang1@huawei.com>
Diffstat (limited to 'src/framework/common/mem_mgr/nsfw_nshmem')
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.c47
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.h22
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.c544
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.h70
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c436
-rw-r--r--src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.h37
6 files changed, 1156 insertions, 0 deletions
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.c b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.c
new file mode 100644
index 0000000..c78c27e
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.c
@@ -0,0 +1,47 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "nsfw_mem_desc.h"
+#include "nsfw_nshmem_mng.h"
+#include "nsfw_nshmem_mdesc.h"
+
+/*no share memory access inferface*/
+nsfw_mem_ops g_nshmem_ops = {
+ nsfw_nshmem_init,
+ nsfw_nshmem_destory,
+ nsfw_nshmem_create,
+ NULL,
+ nsfw_nshmem_lookup,
+ nsfw_nshmem_release,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ nsfw_nshmem_spcreate,
+ NULL,
+ NULL,
+ nsfw_nshmem_sprelease,
+ nsfw_nshmem_sp_lookup,
+ nsfw_nshmem_ringcreate,
+ NULL,
+ nsfw_nshmem_ringrelease,
+ nsfw_nshmem_stactic,
+ NULL,
+ NULL, /*mem_ops_sp_iterator */
+ NULL, /*mem_ops_mbuf_iterator */
+};
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.h b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.h
new file mode 100644
index 0000000..1b63520
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mdesc.h
@@ -0,0 +1,22 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef _NSFW_NSHMEM_MDESC_H
+#define _NSFW_NSHMEM_MDESC_H
+
+extern nsfw_mem_ops g_nshmem_ops;
+
+#endif
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.c b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.c
new file mode 100644
index 0000000..d5661fd
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.c
@@ -0,0 +1,544 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <stdlib.h>
+#include "nstack_log.h"
+#include "nstack_securec.h"
+#include "nsfw_mem_desc.h"
+#include "nsfw_ring_fun.h"
+#include "nsfw_nshmem_ring.h"
+#include "nsfw_nshmem_mng.h"
+
+#include "common_func.h"
+
+#define nsfw_get_glb_lock() (&g_nshmem_internal_cfg->mlock)
+
+#define NSFW_NSHMEM_INIT_CHK_RET_NULL() \
+ if ((!g_nshmem_internal_cfg) || (!g_nshmem_localdata)) \
+ { \
+ NSCOMM_LOGDBG("Error] g_nshmem_internal_cfg=%p, g_nshmem_localdata=%p", g_nshmem_internal_cfg, g_nshmem_localdata); \
+ return NULL; \
+ }
+
+#define NSFW_NSHMEM_INIT_CHK_RET() \
+ if ((!g_nshmem_internal_cfg) || (!g_nshmem_localdata)) \
+ { \
+ NSCOMM_LOGDBG("Error] g_nshmem_internal_cfg=%p, g_nshmem_localdata=%p", g_nshmem_internal_cfg, g_nshmem_localdata); \
+ return NSFW_MEM_ERR; \
+ }
+
+nsfw_mem_localdata *g_nshmem_localdata = NULL;
+nsfw_nshmem_cfg *g_nshmem_internal_cfg = NULL;
+
+/*look up a mem zone*/
+NSTACK_STATIC inline nsfw_nshmem_mzone *
+nsfw_nshmem_get_free_zone (void)
+{
+ int icnt = 0;
+
+ /*g_nshmem_internal_cfg must not be null if come here */
+ for (icnt = 0; icnt < COMMON_MEM_MAX_MEMZONE; icnt++)
+ {
+ if (g_nshmem_internal_cfg->amemzone[icnt].addr == NULL)
+ {
+ return &g_nshmem_internal_cfg->amemzone[icnt];
+ }
+ }
+
+ return NULL;
+}
+
+NSTACK_STATIC inline void
+nsfw_nshmem_free_zone (nsfw_nshmem_mzone * pzone)
+{
+ nsfw_nshmem_mzone *pzonebase = &g_nshmem_internal_cfg->amemzone[0];
+ nsfw_nshmem_mzone *pzoneend =
+ &g_nshmem_internal_cfg->amemzone[NSFW_NSHMEM_ZONE_MAX - 1];
+
+ if ((((int) ((char *) pzone - (char *) pzonebase) < 0)
+ || ((int) ((char *) pzone - (char *) pzoneend) > 0))
+ && ((unsigned int) ((char *) pzone - (char *) pzonebase) %
+ sizeof (nsfw_nshmem_mzone) != 0))
+ {
+ NSCOMM_LOGERR ("nshmem free fail] mem=%p", pzone);
+ return;
+ }
+ if (pzone->addr)
+ {
+ free (pzone->addr);
+ }
+ pzone->addr = NULL;
+
+ int ret = MEMSET_S ((void *) pzone, sizeof (nsfw_nshmem_mzone), 0,
+ sizeof (nsfw_nshmem_mzone));
+ if (EOK != ret)
+ {
+ NSCOMM_LOGERR ("MEMSET_S failed] mem=%p, ret=%d", pzone, ret);
+ }
+ return;
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_init
+* Description : nsh module init
+* Input : nsfw_mem_para* para
+* Output : None
+* Return Value : i32
+* Calls :
+* Called By :
+*****************************************************************************/
+i32
+nsfw_nshmem_init (nsfw_mem_para * para)
+{
+ i32 iret = NSFW_MEM_OK;
+ NSCOMM_LOGINF ("nsfw nshmem init begin");
+ g_nshmem_localdata =
+ (nsfw_mem_localdata *) malloc (sizeof (nsfw_mem_localdata));
+
+ if (NULL == g_nshmem_localdata)
+ {
+ NSCOMM_LOGERR ("nshmem init g_nshmem_localdata malloc fail");
+ return NSFW_MEM_ERR;
+ }
+
+ iret =
+ MEMSET_S (g_nshmem_localdata, sizeof (nsfw_mem_localdata), 0,
+ sizeof (nsfw_mem_localdata));
+
+ if (EOK != iret)
+ {
+ NSCOMM_LOGERR ("nshmem init g_nshmem_localdata MEMSET_S fail");
+ goto ERROR;
+ }
+
+ g_nshmem_internal_cfg =
+ (nsfw_nshmem_cfg *) malloc (sizeof (nsfw_nshmem_cfg));
+
+ if (NULL == g_nshmem_internal_cfg)
+ {
+ NSCOMM_LOGERR ("nshmem init g_nshmem_internal_cfg malloc fail");
+ goto ERROR;
+ }
+
+ iret =
+ MEMSET_S (g_nshmem_internal_cfg, sizeof (nsfw_nshmem_cfg), 0,
+ sizeof (nsfw_nshmem_cfg));
+
+ if (EOK != iret)
+ {
+ NSCOMM_LOGERR ("nshmem init g_nshmem_internal_cfg MEMSET_S fail");
+ goto ERROR;
+ }
+
+ g_nshmem_localdata->enflag = para->enflag;
+ NSCOMM_LOGINF ("nsfw nshmem init end");
+ goto OK;
+
+ERROR:
+ iret = NSFW_MEM_ERR;
+ nsfw_nshmem_destory ();
+ return iret;
+OK:
+ iret = NSFW_MEM_OK;
+ return iret;
+}
+
+/*
+ * memory destory
+ */
+void
+nsfw_nshmem_destory (void)
+{
+ if (g_nshmem_localdata)
+ {
+ free (g_nshmem_localdata);
+ g_nshmem_localdata = NULL;
+ }
+
+ if (g_nshmem_internal_cfg)
+ {
+ free (g_nshmem_internal_cfg);
+ g_nshmem_internal_cfg = NULL;
+ }
+
+ return;
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_reserv_safe
+* Description : malloc a memory and save to memzone
+* Input : const char* name
+* size_t lenth
+* Output : None
+* Return Value : mzone_handle
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+mzone_handle
+nsfw_nshmem_reserv_safe (const char *name, size_t lenth)
+{
+ void *addr = NULL;
+ i32 iret = NSFW_MEM_OK;
+ nsfw_nshmem_mzone *pmemzone = NULL;
+
+ if (lenth <= 0)
+ {
+ return NULL;
+ }
+
+ nsfw_write_lock (nsfw_get_glb_lock ());
+
+ addr = malloc (lenth);
+ if (!addr)
+ {
+ NSCOMM_LOGERR ("nshmem malloc addr fail] addr=%p", addr);
+ nsfw_write_unlock (nsfw_get_glb_lock ());
+ return NULL;
+ }
+
+ iret = MEMSET_S (addr, lenth, 0, lenth);
+ if (EOK != iret)
+ {
+ NSCOMM_LOGERR ("nshmem malloc addr MEMSET_S fail] addr=%p", addr);
+ free (addr);
+ nsfw_write_unlock (nsfw_get_glb_lock ());
+ return NULL;
+ }
+
+ pmemzone = nsfw_nshmem_get_free_zone ();
+
+ if (!pmemzone)
+ {
+ NSCOMM_LOGERR ("nshmem get free zone fail");
+ free (addr);
+ nsfw_write_unlock (nsfw_get_glb_lock ());
+ return NULL;
+ }
+
+ pmemzone->addr = addr;
+ pmemzone->lenth = lenth;
+ /*name must be less than NSFW_MEM_APPNAME_LENTH */
+ if (EOK !=
+ STRCPY_S ((char *) pmemzone->aname, sizeof (pmemzone->aname), name))
+ {
+ NSCOMM_LOGERR ("STRCPY_S failed]name=%s", name);
+ free (addr);
+ nsfw_write_unlock (nsfw_get_glb_lock ());
+ return NULL;
+ }
+
+ nsfw_write_unlock (nsfw_get_glb_lock ());
+ return addr;
+}
+
+/*
+ * create no shared memory
+ * nsfw_mem_zone::stname no shared memory name
+ * nsfw_mem_zone::isize memory size
+ */
+mzone_handle
+nsfw_nshmem_create (nsfw_mem_zone * pinfo)
+{
+
+ NSFW_NAME_LENCHECK_RET_NULL (pinfo->stname.aname, "nshmem create");
+ NSFW_NSHMEM_INIT_CHK_RET_NULL ();
+ return nsfw_nshmem_reserv_safe (pinfo->stname.aname, pinfo->lenth);
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_lookup
+* Description : find a block memory by name
+* Input : nsfw_mem_name* pname
+* Output : None
+* Return Value : mzone_handle
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+mzone_handle
+nsfw_nshmem_lookup (nsfw_mem_name * pname)
+{
+ int icnt = 0;
+ nsfw_nshmem_mzone *mz = NULL;
+
+ NSFW_NAME_LENCHECK_RET_NULL (pname->aname, "nshmem lookup");
+ NSFW_NSHMEM_INIT_CHK_RET_NULL ();
+ nsfw_read_lock (nsfw_get_glb_lock ());
+
+ for (icnt = 0; icnt < NSFW_NSHMEM_ZONE_MAX; icnt++)
+ {
+ mz = &g_nshmem_internal_cfg->amemzone[icnt];
+
+ if (mz->addr != NULL
+ && !strncmp (pname->aname, mz->aname, NSFW_MEM_NAME_LENTH))
+ {
+ nsfw_read_unlock (nsfw_get_glb_lock ());
+ return mz->addr;
+ }
+ }
+
+ nsfw_read_unlock (nsfw_get_glb_lock ());
+ return NULL;
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_release
+* Description : free a block memory
+* Input : nsfw_mem_name* pname
+* Output : None
+* Return Value : i32
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+i32
+nsfw_nshmem_release (nsfw_mem_name * pname)
+{
+ int icnt = 0;
+ nsfw_nshmem_mzone *mz = NULL;
+
+ NSFW_NAME_LENCHECK_RET (pname->aname, "nshmem release");
+ NSFW_NSHMEM_INIT_CHK_RET ();
+ nsfw_read_lock (nsfw_get_glb_lock ());
+
+ for (icnt = 0; icnt < NSFW_NSHMEM_ZONE_MAX; icnt++)
+ {
+ mz = &g_nshmem_internal_cfg->amemzone[icnt];
+
+ if (mz->addr != NULL
+ && !strncmp (pname->aname, mz->aname, NSFW_MEM_NAME_LENTH))
+ {
+ nsfw_nshmem_free_zone (mz);
+ nsfw_read_unlock (nsfw_get_glb_lock ());
+ return NSFW_MEM_OK;
+ }
+ }
+
+ nsfw_read_unlock (nsfw_get_glb_lock ());
+ return NSFW_MEM_OK;
+
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_spcreate
+* Description : create a memory pool by ring
+* Input : nsfw_mem_sppool* pmpinfo
+* Output : None
+* Return Value : mring_handle
+* Calls :
+* Called By :
+*****************************************************************************/
+mring_handle
+nsfw_nshmem_spcreate (nsfw_mem_sppool * pmpinfo)
+{
+ size_t len = 0;
+ unsigned int usnum = common_mem_align32pow2 (pmpinfo->usnum + 1);
+ unsigned int uselt_size = pmpinfo->useltsize;
+ struct nsfw_mem_ring *pringhead = NULL;
+ unsigned int uscnt = 0;
+ char *pmz = NULL;
+ NSFW_NAME_LENCHECK_RET_NULL (pmpinfo->stname.aname, "nshmem sp create");
+ NSFW_NSHMEM_INIT_CHK_RET_NULL ();
+
+ len =
+ sizeof (struct nsfw_mem_ring) +
+ (size_t) usnum *sizeof (union RingData_U) + (size_t) usnum *uselt_size;
+ pringhead =
+ (struct nsfw_mem_ring *) nsfw_nshmem_reserv_safe (pmpinfo->stname.aname,
+ len);
+
+ if (!pringhead)
+ {
+ NSCOMM_LOGERR ("nshmem sp create mzone reserv fail");
+ return NULL;
+ }
+
+ nsfw_mem_ring_init (pringhead, usnum, pringhead, NSFW_NSHMEM,
+ pmpinfo->enmptype);
+ pmz =
+ ((char *) pringhead + sizeof (struct nsfw_mem_ring) +
+ usnum * sizeof (union RingData_U));
+
+ for (uscnt = 0; uscnt < usnum; uscnt++)
+ {
+ if (0 ==
+ g_ring_ops_arry[pringhead->memtype][pringhead->
+ ringflag].ring_ops_enqueue
+ (pringhead, (void *) pmz))
+ {
+ NSCOMM_LOGERR ("nsfw_nshmem_ringenqueue enque fail] uscnt=%u",
+ uscnt);
+ }
+
+ pmz = pmz + uselt_size;
+ }
+
+ return pringhead;
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_sp_lookup
+* Description : look up a sppool memory
+* Input : nsfw_mem_name* pname
+* Output : None
+* Return Value : mring_handle
+* Calls :
+* Called By :
+*****************************************************************************/
+mring_handle
+nsfw_nshmem_sp_lookup (nsfw_mem_name * pname)
+{
+ return nsfw_nshmem_lookup (pname);
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_sprelease
+* Description : release a sp pool
+* Input : nsfw_mem_name* pname
+* Output : None
+* Return Value : i32
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+i32
+nsfw_nshmem_sprelease (nsfw_mem_name * pname)
+{
+ NSFW_NAME_LENCHECK_RET (pname->aname, "nshmem sp mempool release");
+ NSFW_NSHMEM_INIT_CHK_RET ();
+ return nsfw_nshmem_release (pname);
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_ringcreate
+* Description : create a ring
+* Input : nsfw_mem_mring* pringinfo
+* Output : None
+* Return Value : mring_handle
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+mring_handle
+nsfw_nshmem_ringcreate (nsfw_mem_mring * pringinfo)
+{
+ size_t len = 0;
+ unsigned int usnum = common_mem_align32pow2 (pringinfo->usnum + 1);
+ struct nsfw_mem_ring *pringhead = NULL;
+ NSFW_NAME_LENCHECK_RET_NULL (pringinfo->stname.aname, "nshmem ring create");
+ NSFW_NSHMEM_INIT_CHK_RET_NULL ();
+
+ len = sizeof (struct nsfw_mem_ring) + usnum * sizeof (union RingData_U);
+ pringhead =
+ (struct nsfw_mem_ring *) nsfw_nshmem_reserv_safe (pringinfo->stname.aname,
+ len);
+
+ if (!pringhead)
+ {
+ NSCOMM_LOGERR ("nshmem ring create mzone reserv fail");
+ return NULL;
+ }
+
+ nsfw_mem_ring_init (pringhead, usnum, (void *) pringhead, NSFW_NSHMEM,
+ pringinfo->enmptype);
+ return pringhead;
+
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_ringrelease
+* Description : release a nsh ring memory
+* Input : nsfw_mem_name* pname
+* Output : None
+* Return Value : i32
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+i32
+nsfw_nshmem_ringrelease (nsfw_mem_name * pname)
+{
+ NSFW_NAME_LENCHECK_RET (pname->aname, "nshmem ring mempool release");
+ NSFW_NSHMEM_INIT_CHK_RET ();
+ return nsfw_nshmem_release (pname);
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_sppool_statics
+* Description : static the memory size of sppool
+* Input : mring_handle sppool
+* Output : None
+* Return Value : ssize_t
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+ssize_t
+nsfw_nshmem_sppool_statics (mring_handle sppool)
+{
+ struct nsfw_mem_ring *phead = (struct nsfw_mem_ring *) sppool;
+
+ return sizeof (struct nsfw_mem_ring) +
+ (ssize_t) phead->size * sizeof (union RingData_U) +
+ (ssize_t) phead->size * phead->eltsize;
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_ring_statics
+* Description : static the memory size of ring
+* Input : mring_handle handle
+* Output : None
+* Return Value : ssize_t
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+ssize_t
+nsfw_nshmem_ring_statics (mring_handle handle)
+{
+ struct nsfw_mem_ring *ring = (struct nsfw_mem_ring *) handle;
+ return ring->size * sizeof (union RingData_U) +
+ sizeof (struct nsfw_mem_ring);
+}
+
+/*****************************************************************************
+* Prototype : nsfw_nshmem_stactic
+* Description : static the memory size according to mem type
+* Input : void* handle
+* nsfw_mem_struct_type type
+* Output : None
+* Return Value : ssize_t
+* Calls :
+* Called By :
+*
+*****************************************************************************/
+ssize_t
+nsfw_nshmem_stactic (void *handle, nsfw_mem_struct_type type)
+{
+ switch (type)
+ {
+ case NSFW_MEM_MBUF:
+ return -1;
+ case NSFW_MEM_SPOOL:
+ return nsfw_nshmem_sppool_statics (handle);
+ case NSFW_MEM_RING:
+ return nsfw_nshmem_ring_statics (handle);
+ default:
+ break;
+ }
+ return -1;
+}
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.h b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.h
new file mode 100644
index 0000000..3f5b1b9
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_mng.h
@@ -0,0 +1,70 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef _NSFW_NSHMEM_MNG_H_
+#define _NSFW_NSHMEM_MNG_H_
+
+#include "generic/common_mem_rwlock.h"
+
+#include "common_func.h"
+
+#define NSFW_NSHMEM_ZONE_MAX 2560
+
+typedef struct
+{
+ i8 aname[NSFW_MEM_NAME_LENTH];
+ void *addr;
+ int lenth;
+} nsfw_nshmem_mzone;
+
+typedef struct
+{
+ nsfw_nshmem_mzone amemzone[NSFW_NSHMEM_ZONE_MAX];
+ common_mem_rwlock_t mlock;
+} nsfw_nshmem_cfg;
+
+/*
+ * no share memory module init
+ */
+i32 nsfw_nshmem_init (nsfw_mem_para * para);
+
+/*
+ * no share memory moudle destory
+ */
+void nsfw_nshmem_destory (void);
+
+/*
+ * create a no shared memory
+ */
+mzone_handle nsfw_nshmem_create (nsfw_mem_zone * pinfo);
+
+mzone_handle nsfw_nshmem_lookup (nsfw_mem_name * pname);
+
+i32 nsfw_nshmem_release (nsfw_mem_name * pname);
+
+mring_handle nsfw_nshmem_spcreate (nsfw_mem_sppool * pmpinfo);
+
+i32 nsfw_nshmem_sprelease (nsfw_mem_name * pname);
+
+mring_handle nsfw_nshmem_sp_lookup (nsfw_mem_name * pname);
+
+mring_handle nsfw_nshmem_ringcreate (nsfw_mem_mring * pringinfo);
+
+i32 nsfw_nshmem_ringrelease (nsfw_mem_name * pname);
+
+ssize_t nsfw_nshmem_stactic (void *handle, nsfw_mem_struct_type type);
+
+#endif
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c
new file mode 100644
index 0000000..64e7d57
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c
@@ -0,0 +1,436 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <string.h>
+#include <sched.h>
+#include "nstack_securec.h"
+
+#include "nsfw_mem_desc.h"
+#include "nsfw_nshmem_ring.h"
+#include "nsfw_ring_fun.h"
+#include "common_func.h"
+
+/*copy the data to obj*/
+NSTACK_STATIC inline void
+nsfw_nshmem_ring_obj_copy (struct nsfw_mem_ring *r, uint32_t cons_head,
+ void **obj_table, unsigned n)
+{
+ uint32_t idx = cons_head & r->mask;
+ unsigned i = 0;
+ const uint32_t size = r->size;
+
+ if (likely (idx + n < size))
+ {
+ for (i = 0; i < (n & (~(unsigned) 0x3)); i += 4, idx += 4)
+ {
+ obj_table[i] = (void *) r->ring[idx].data_l;
+ obj_table[i + 1] = (void *) r->ring[idx + 1].data_l;
+ obj_table[i + 2] = (void *) r->ring[idx + 2].data_l;
+ obj_table[i + 3] = (void *) r->ring[idx + 3].data_l;
+ }
+ switch (n & 0x3)
+ {
+ case 3:
+ obj_table[i++] = (void *) r->ring[idx++].data_l;
+
+ case 2:
+ obj_table[i++] = (void *) r->ring[idx++].data_l;
+
+ case 1:
+ obj_table[i++] = (void *) r->ring[idx++].data_l;
+ }
+ }
+ else
+ {
+ for (i = 0; idx < size; i++, idx++)
+ {
+ obj_table[i] = (void *) r->ring[idx].data_l;
+ }
+
+ for (idx = 0; i < n; i++, idx++)
+ {
+ obj_table[i] = (void *) r->ring[idx].data_l;
+ }
+ }
+}
+
+/*fork recover*/
+NSTACK_STATIC inline void
+nsfw_nshmem_enqueue_fork_recov (struct nsfw_mem_ring *r)
+{
+ u32_t pidflag = 0;
+ u32_t curpid = get_sys_pid ();
+ int success = 0;
+ /*if pid is not the same, maybe mult thread fork happen */
+ pidflag = r->prodhflag;
+
+ if (unlikely (pidflag != curpid))
+ {
+ success = common_mem_atomic32_cmpset (&r->prodhflag, pidflag, curpid);
+
+ if (unlikely (success != 0))
+ {
+ /*recover it */
+ if (r->prod.tail != r->prod.head)
+ {
+ r->prod.head = r->prod.tail;
+ }
+
+ r->prodtflag = curpid;
+ }
+ }
+
+ return;
+}
+
+NSTACK_STATIC inline void
+nsfw_nshmem_dequeue_fork_recov (struct nsfw_mem_ring *r)
+{
+ u32_t pidflag = 0;
+ u32_t curpid = get_sys_pid ();
+ int success = 0;
+ /*if pid is not the same, maybe mult thread fork happen */
+ pidflag = r->conshflag;
+
+ if (unlikely (pidflag != curpid))
+ {
+ success = common_mem_atomic32_cmpset (&r->conshflag, pidflag, curpid);
+
+ if (unlikely (success != 0))
+ {
+ /*recover it */
+ if (r->cons.tail != r->cons.head)
+ {
+ r->cons.head = r->cons.tail;
+ }
+
+ r->constflag = curpid;
+ }
+ }
+
+ return;
+}
+
+/*
+this is a multi thread/process enqueue function, please pay attention to the bellow point
+1. while Enqueue corrupt, we may lose one element; because no one to add the Head
+*/
+int
+nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t cons_tail, free_entries;
+ int success;
+ unsigned rep = 0;
+ uint32_t mask = mem_ring->mask;
+ uint32_t size = mem_ring->size;
+ uint32_t n = 1;
+
+ /* move prod.head atomically */
+ do
+ {
+
+ prod_head = mem_ring->prod.head;
+ cons_tail = mem_ring->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = (size + cons_tail - prod_head);
+
+ /* check that we have enough room in ring */
+ if (unlikely (n > free_entries))
+ {
+ return 0;
+ /* Below code is commented currenlty as its a dead code. */
+ }
+
+ /*if pid is not the same, maybe mult thread fork happen */
+ nsfw_nshmem_enqueue_fork_recov (mem_ring);
+
+ while (unlikely
+ ((mem_ring->prod.tail != mem_ring->prod.head)
+ || (mem_ring->prodtflag != mem_ring->prodhflag)))
+ {
+ common_mem_pause ();
+ }
+
+ prod_next = prod_head + n;
+ success =
+ common_mem_atomic32_cmpset (&mem_ring->prod.head, prod_head,
+ prod_next);
+ }
+ while (unlikely (success == 0));
+
+ mem_ring->ring[prod_head & mask].data_l = (u64) obj_table;
+
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely (mem_ring->prod.tail != prod_head))
+ {
+ common_mem_pause ();
+
+ /* Set COMMON_RING_PAUSE_REP_COUNT to avoid spin too long waiting
+ * for other thread finish. It gives pre-empted thread a chance
+ * to proceed and finish with ring dequeue operation. */
+ /* check the queue can be operate */
+ if (++rep == 5)
+ {
+ rep = 0;
+ (void) sched_yield ();
+ }
+ }
+
+ mem_ring->prod.tail = prod_next;
+ return (int) n;
+}
+
+/*
+ this is a single thread/process enqueue function
+ */
+int
+nsfw_nshmem_ring_sp_enqueue (struct nsfw_mem_ring *r, void *obj_table)
+{
+ uint32_t prod_head, cons_tail;
+ uint32_t prod_next, free_entries;
+ uint32_t mask = r->mask;
+ uint32_t n = 1;
+ uint32_t size = r->size;
+
+ prod_head = r->prod.head;
+ cons_tail = r->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = size + cons_tail - prod_head;
+
+ /* check that we have enough room in ring */
+ if (unlikely (n > free_entries))
+ {
+ return 0;
+ }
+
+ nsfw_nshmem_enqueue_fork_recov (r);
+
+ prod_next = prod_head + n;
+ r->prod.head = prod_next;
+
+ r->ring[prod_head & mask].data_l = (u64) obj_table;
+
+ r->prod.tail = prod_next;
+ return (int) n;
+}
+
+/*
+ this is enhanced mc_ring_dequeue, support dequeue multi element one time.
+*/
+int
+nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *r, void **obj_table,
+ unsigned int n)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ int success;
+ unsigned rep = 0;
+ uint32_t num = n;
+
+ /* Avoid the unnecessary cmpset operation below, which is also
+ * potentially harmful when n equals 0. */
+ if (unlikely (num == 0))
+ {
+ return 0;
+ }
+
+ nsfw_nshmem_dequeue_fork_recov (r);
+
+ /* move cons.head atomically */
+ do
+ {
+ num = n;
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = (prod_tail - cons_head);
+
+ /* Set the actual entries for dequeue */
+ if (unlikely (num > entries))
+ {
+ if (likely (entries > 0))
+ {
+ num = entries;
+ }
+ else
+ {
+ return 0;
+ }
+ }
+
+ /* check the queue can be operate */
+ while (unlikely
+ ((r->cons.tail != r->cons.head)
+ || (r->conshflag != r->constflag)))
+ {
+ common_mem_pause ();
+ }
+
+ cons_next = cons_head + num;
+
+ success =
+ common_mem_atomic32_cmpset (&r->cons.head, cons_head, cons_next);
+ }
+ while (unlikely (success == 0));
+
+ nsfw_nshmem_ring_obj_copy (r, cons_head, obj_table, num);
+
+ /*
+ * If there are other dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely (r->cons.tail != cons_head))
+ {
+ common_mem_pause ();
+
+ /* Set COMMON_RING_PAUSE_REP_COUNT to avoid spin too long waiting
+ * for other thread finish. It gives pre-empted thread a chance
+ * to proceed and finish with ring dequeue operation. */
+ /* check the queue can be operate */
+ if (++rep == 5)
+ {
+ rep = 0;
+ (void) sched_yield ();
+ }
+ }
+
+ r->cons.tail = cons_next;
+
+ return (int) num;
+}
+
+/*this is a multi thread/process dequeue function, please pay attention to the bellow point
+1. while dequeue corrupt, the tail no one added, may multy the try times.
+*/
+int
+nsfw_nshmem_ring_mc_dequeue (struct nsfw_mem_ring *ring, void **box)
+{
+ return nsfw_nshmem_ring_mc_dequeuev (ring, box, 1);
+}
+
+/*
+ this is a single thread/process dequeue function
+*/
+int
+nsfw_nshmem_ring_sc_dequeuev (struct nsfw_mem_ring *r, void **obj_table,
+ unsigned int n)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ uint32_t inum = n;
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = prod_tail - cons_head;
+
+ if (unlikely (inum > entries))
+ {
+ if (likely (entries > 0))
+ {
+ inum = entries;
+ }
+ else
+ {
+ return 0;
+ }
+ }
+
+ nsfw_nshmem_dequeue_fork_recov (r);
+
+ cons_next = cons_head + inum;
+ r->cons.head = cons_next;
+
+ nsfw_nshmem_ring_obj_copy (r, cons_head, obj_table, inum);
+
+ r->cons.tail = cons_next;
+ return (int) inum;
+}
+
+/*
+ this is enhanced mc_ring_dequeue, support dequeue multi element one time.
+*/
+int
+nsfw_nshmem_ring_sc_dequeue (struct nsfw_mem_ring *ring, void **box)
+{
+ return nsfw_nshmem_ring_sc_dequeuev (ring, box, 1);
+}
+
+/*stack just using one thread, for performance using que not support multi thread*/
+int
+nsfw_nshmem_ring_singlethread_enqueue (struct nsfw_mem_ring *ring, void *box)
+{
+ u32 head = 0;
+
+ /*if queue is full, just return 0 */
+ if (unlikely (ring->prod.head >= (ring->size + ring->cons.tail)))
+ {
+ return 0;
+ }
+
+ head = ring->prod.head;
+ ring->ring[head & ring->mask].data_l = (u64) box;
+ ring->prod.head++;
+ return 1;
+}
+
+/*stack just using one thread, for performance using que not support multi thread*/
+int
+nsfw_nshmem_ring_singlethread_dequeue (struct nsfw_mem_ring *ring, void **box)
+{
+ return nsfw_nshmem_ring_singlethread_dequeuev (ring, box, 1);
+}
+
+/*stack just using one thread, for performance using que not support multi thread*/
+int
+nsfw_nshmem_ring_singlethread_dequeuev (struct nsfw_mem_ring *ring,
+ void **box, unsigned int n)
+{
+ u32 tail = 0;
+ u32 num = 0;
+
+ while (num < n)
+ {
+ tail = ring->cons.tail;
+
+ /* if all entries are dequed return 0 */
+ if (unlikely (ring->prod.head == ring->cons.tail))
+ {
+ return num;
+ }
+
+ box[num] = (void *) ring->ring[tail & ring->mask].data_l;
+ ring->cons.tail++;
+ num++;
+ }
+
+ return num;
+}
diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.h b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.h
new file mode 100644
index 0000000..93a4d4a
--- /dev/null
+++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.h
@@ -0,0 +1,37 @@
+/*
+*
+* Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef _NSFW_NSHMEM_RING_H_
+#define _NSFW_NSHMEM_RING_H_
+
+#include <stdint.h>
+
+int nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *ring, void *box);
+int nsfw_nshmem_ring_sp_enqueue (struct nsfw_mem_ring *ring, void *box);
+int nsfw_nshmem_ring_mc_dequeue (struct nsfw_mem_ring *ring, void **box);
+int nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *ring, void **box,
+ unsigned int n);
+int nsfw_nshmem_ring_sc_dequeue (struct nsfw_mem_ring *ring, void **box);
+int nsfw_nshmem_ring_sc_dequeuev (struct nsfw_mem_ring *ring, void **box,
+ unsigned int n);
+int nsfw_nshmem_ring_singlethread_enqueue (struct nsfw_mem_ring *ring,
+ void *box);
+int nsfw_nshmem_ring_singlethread_dequeue (struct nsfw_mem_ring *ring,
+ void **box);
+int nsfw_nshmem_ring_singlethread_dequeuev (struct nsfw_mem_ring *ring,
+ void **box, unsigned int n);
+
+#endif /*_NSFW_NSHMEM_RING_H_*/