summaryrefslogtreecommitdiffstats
path: root/src/pal/linux
diff options
context:
space:
mode:
Diffstat (limited to 'src/pal/linux')
-rwxr-xr-xsrc/pal/linux/CRing.h98
-rwxr-xr-xsrc/pal/linux/mbuf.cpp425
-rwxr-xr-xsrc/pal/linux/mbuf.h192
-rwxr-xr-xsrc/pal/linux/pal_utl.cpp29
-rwxr-xr-xsrc/pal/linux/pal_utl.h70
-rwxr-xr-xsrc/pal/linux/sanb_atomic.h175
6 files changed, 989 insertions, 0 deletions
diff --git a/src/pal/linux/CRing.h b/src/pal/linux/CRing.h
new file mode 100755
index 00000000..cf69422e
--- /dev/null
+++ b/src/pal/linux/CRing.h
@@ -0,0 +1,98 @@
+#ifndef C_RING_H
+#define C_RING_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <assert.h>
+#include <stdint.h>
+#include <string>
+#include <queue>
+
+
+
+typedef std::queue<void *> my_stl_queue_t;
+
+class CRingSp {
+public:
+ CRingSp(){
+ m_queue=0;
+ }
+
+ bool Create(std::string name,
+ uint16_t cnt,
+ int socket_id){
+ m_queue = new my_stl_queue_t();
+ assert(m_queue);
+ return(true);
+ }
+
+ void Delete(void){
+ if (m_queue) {
+ delete m_queue ;
+ m_queue=0;
+ }
+ }
+
+ int Enqueue(void *obj){
+ m_queue->push(obj);
+ return (0);
+ }
+
+ int Dequeue(void * & obj){
+ if ( !m_queue->empty() ){
+ obj= m_queue->front();
+ m_queue->pop();
+ return (0);
+ }else{
+ return (1);
+ }
+ }
+
+ bool isFull(void){
+ return (false);
+ }
+
+ bool isEmpty(void){
+ return ( m_queue->empty() ?true:false);
+ }
+
+private:
+ my_stl_queue_t * m_queue;
+};
+
+template <class T>
+class CTRingSp : public CRingSp {
+public:
+ int Enqueue(T *obj){
+ return ( CRingSp::Enqueue((void*)obj) );
+ }
+
+ int Dequeue(T * & obj){
+ return (CRingSp::Dequeue(*((void **)&obj)));
+ }
+};
+
+
+
+
+#endif
+
diff --git a/src/pal/linux/mbuf.cpp b/src/pal/linux/mbuf.cpp
new file mode 100755
index 00000000..7eca8fd5
--- /dev/null
+++ b/src/pal/linux/mbuf.cpp
@@ -0,0 +1,425 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "mbuf.h"
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include "sanb_atomic.h"
+
+
+#define RTE_MBUF_TO_BADDR(mb) (((struct rte_mbuf *)(mb)) + 1)
+#define RTE_MBUF_FROM_BADDR(ba) (((struct rte_mbuf *)(ba)) - 1)
+
+
+void rte_pktmbuf_detach(struct rte_mbuf *m);
+
+
+
+void utl_rte_check(rte_mempool_t * mp){
+ assert(mp->magic == MAGIC0);
+ assert(mp->magic2 == MAGIC2);
+}
+
+void utl_rte_pktmbuf_check(struct rte_mbuf *m){
+ utl_rte_check(m->pool);
+ assert(m->magic == MAGIC0);
+ assert(m->magic2== MAGIC2);
+}
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id){
+ rte_mempool_t * p=new rte_mempool_t();
+ assert(p);
+ p->elt_size =elt_size;
+ p->size=n;
+ p->magic=MAGIC0;
+ p->magic2=MAGIC2;
+ return p;
+}
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id,
+ int socket_id
+ ){
+
+ rte_mempool_t * p=new rte_mempool_t();
+ assert(p);
+ p->size=n;
+ p->elt_size =elt_size;
+ p->magic=MAGIC0;
+ p->magic2=MAGIC2;
+ return p;
+}
+
+
+uint16_t rte_mbuf_refcnt_update(rte_mbuf_t *m, int16_t value)
+{
+ utl_rte_pktmbuf_check(m);
+ uint32_t a=sanb_atomic_add_return_32_old(&m->refcnt_reserved,1);
+ return (a);
+}
+
+
+
+
+void rte_pktmbuf_reset(struct rte_mbuf *m)
+{
+ utl_rte_pktmbuf_check(m);
+ m->next = NULL;
+ m->pkt_len = 0;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
+ m->refcnt_reserved=1;
+
+ #if RTE_PKTMBUF_HEADROOM > 0
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+ #else
+ m->data_off = RTE_PKTMBUF_HEADROOM ;
+ #endif
+
+ m->data_len = 0;
+}
+
+
+rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp){
+
+ uint16_t buf_len;
+
+ utl_rte_check(mp);
+
+ buf_len = mp->elt_size ;
+
+ rte_mbuf_t *m =(rte_mbuf_t *)malloc(buf_len );
+ assert(m);
+
+ m->magic = MAGIC0;
+ m->magic2 = MAGIC2;
+ m->pool = mp;
+ m->refcnt_reserved =0;
+
+ m->buf_len = buf_len;
+ m->buf_addr =(char *)((char *)m+sizeof(rte_mbuf_t)+RTE_PKTMBUF_HEADROOM) ;
+
+ rte_pktmbuf_reset(m);
+ return (m);
+}
+
+
+
+void rte_pktmbuf_free_seg(rte_mbuf_t *m){
+
+ utl_rte_pktmbuf_check(m);
+ uint32_t old=sanb_atomic_dec2zero32(&m->refcnt_reserved);
+ if (old == 1) {
+ struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr);
+
+ if ( md != m ) {
+ rte_pktmbuf_detach(m);
+ if (rte_mbuf_refcnt_update(md, -1) == 0)
+ free(md);
+ }
+
+ free(m);
+ }
+}
+
+
+
+void rte_pktmbuf_free(rte_mbuf_t *m){
+
+ rte_mbuf_t *m_next;
+
+ utl_rte_pktmbuf_check(m);
+
+ while (m != NULL) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+}
+
+static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
+{
+ struct rte_mbuf *m2 = (struct rte_mbuf *)m;
+ utl_rte_pktmbuf_check(m);
+
+
+ while (m2->next != NULL)
+ m2 = m2->next;
+ return m2;
+}
+
+static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
+{
+ return m->data_off;
+}
+
+static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
+{
+ return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
+ m->data_len);
+}
+
+
+char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+ void *tail;
+ struct rte_mbuf *m_last;
+ utl_rte_pktmbuf_check(m);
+
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (len > rte_pktmbuf_tailroom(m_last))
+ return NULL;
+
+ tail = (char*) m_last->buf_addr + m_last->data_len;
+ m_last->data_len = (uint16_t)(m_last->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
+ return (char*) tail;
+}
+
+
+char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
+{
+ utl_rte_pktmbuf_check(m);
+
+ if (len > m->data_len)
+ return NULL;
+
+ m->data_len = (uint16_t)(m->data_len - len);
+ m->data_off += len;
+ m->pkt_len = (m->pkt_len - len);
+ return (char *)m->buf_addr + m->data_off;
+}
+
+
+int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
+{
+ struct rte_mbuf *m_last;
+ utl_rte_pktmbuf_check(m);
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (len > m_last->data_len)
+ return -1;
+
+ m_last->data_len = (uint16_t)(m_last->data_len - len);
+ m->pkt_len = (m->pkt_len - len);
+ return 0;
+}
+
+
+static void
+rte_pktmbuf_hexdump(const void *buf, unsigned int len)
+{
+ unsigned int i, out, ofs;
+ const unsigned char *data = (unsigned char *)buf;
+#define LINE_LEN 80
+ char line[LINE_LEN];
+
+ printf(" dump data at 0x%p, len=%u\n", data, len);
+ ofs = 0;
+ while (ofs < len) {
+ out = snprintf(line, LINE_LEN, " %08X", ofs);
+ for (i = 0; ofs+i < len && i < 16; i++)
+ out += snprintf(line+out, LINE_LEN - out, " %02X",
+ data[ofs+i]&0xff);
+ for (; i <= 16; i++)
+ out += snprintf(line+out, LINE_LEN - out, " ");
+ for (i = 0; ofs < len && i < 16; i++, ofs++) {
+ unsigned char c = data[ofs];
+ if (!isascii(c) || !isprint(c))
+ c = '.';
+ out += snprintf(line+out, LINE_LEN - out, "%c", c);
+ }
+ printf("%s\n", line);
+ }
+}
+
+
+int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p){
+ utl_rte_check(mp);
+ uint16_t buf_len;
+ buf_len = mp->elt_size ;
+ *obj_p=(void *)::malloc(buf_len);
+ return (0);
+}
+
+void rte_mempool_sp_put(struct rte_mempool *mp, void *obj){
+ free(obj);
+}
+
+
+void rte_exit(int exit_code, const char *format, ...){
+ exit(exit_code);
+}
+
+
+
+void
+rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len)
+{
+ unsigned int len;
+ unsigned nb_segs;
+
+
+ printf("dump mbuf at 0x%p, phys=0x%p, buf_len=%u\n",
+ m, m->buf_addr, (unsigned)m->buf_len);
+ printf(" pkt_len=%u, nb_segs=%u, "
+ "in_port=%u\n", m->pkt_len,
+ (unsigned)m->nb_segs, (unsigned)m->in_port);
+ nb_segs = m->nb_segs;
+
+ while (m && nb_segs != 0) {
+
+ printf(" segment at 0x%p, data=0x%p, data_len=%u\n",
+ m, m->buf_addr, (unsigned)m->data_len);
+ len = dump_len;
+ if (len > m->data_len)
+ len = m->data_len;
+ if (len != 0)
+ rte_pktmbuf_hexdump(m->buf_addr, len);
+ dump_len -= len;
+ m = m->next;
+ nb_segs --;
+ }
+}
+
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after2(rte_mbuf_t *m1,rte_mbuf_t *m2){
+ utl_rte_pktmbuf_check(m1);
+ utl_rte_pktmbuf_check(m2);
+
+ m1->next=m2;
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+
+
+void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
+{
+
+ rte_mbuf_refcnt_update(md, 1);
+ mi->buf_addr = md->buf_addr;
+ mi->buf_len = md->buf_len;
+ mi->data_off = md->data_off;
+
+ mi->next = NULL;
+ mi->pkt_len = mi->data_len;
+ mi->nb_segs = 1;
+}
+
+void rte_pktmbuf_detach(struct rte_mbuf *m)
+{
+ const struct rte_mempool *mp = m->pool;
+ void *buf = RTE_MBUF_TO_BADDR(m);
+ uint32_t buf_len = mp->elt_size - sizeof(*m);
+
+ m->buf_addr = buf;
+ m->buf_len = (uint16_t)buf_len;
+
+ #if RTE_PKTMBUF_HEADROOM > 0
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+ #else
+ m->data_off = RTE_PKTMBUF_HEADROOM ;
+ #endif
+
+
+ m->data_len = 0;
+}
+
+
+
+
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after(rte_mbuf_t *m1,rte_mbuf_t *m2){
+
+ utl_rte_pktmbuf_check(m1);
+ utl_rte_pktmbuf_check(m2);
+
+ rte_mbuf_refcnt_update(m2,1);
+ m1->next=m2;
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+
+uint64_t rte_rand(void){
+ return ( rand() );
+}
+
+
+
+
+
+#ifdef ONLY_A_TEST
+
+
+#define CONST_NB_MBUF 2048
+#define CONST_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+
+void test_pkt_mbuf(){
+
+ rte_mempool_t * mp1=utl_rte_mempool_create("big-const",
+ CONST_NB_MBUF,
+ CONST_MBUF_SIZE,
+ 32);
+ rte_mbuf_t * m1 = rte_pktmbuf_alloc(mp1);
+ rte_mbuf_t * m2 = rte_pktmbuf_alloc(mp1);
+
+ char *p=rte_pktmbuf_append(m1, 10);
+ int i;
+
+ for (i=0; i<10;i++) {
+ p[i]=i;
+ }
+
+ p=rte_pktmbuf_append(m2, 10);
+
+ for (i=0; i<10;i++) {
+ p[i]=0x55+i;
+ }
+
+ rte_pktmbuf_dump(m1, m1->pkt_len);
+ rte_pktmbuf_dump(m2, m1->pkt_len);
+
+ rte_pktmbuf_free(m1);
+ rte_pktmbuf_free(m2);
+}
+
+
+
+
+#endif
diff --git a/src/pal/linux/mbuf.h b/src/pal/linux/mbuf.h
new file mode 100755
index 00000000..693b095a
--- /dev/null
+++ b/src/pal/linux/mbuf.h
@@ -0,0 +1,192 @@
+#ifndef MBUF_H
+#define MBUF_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <string.h>
+
+#define MAGIC0 0xAABBCCDD
+#define MAGIC2 0x11223344
+
+struct rte_mempool {
+ uint32_t magic;
+ uint32_t elt_size;
+ uint32_t magic2;
+ uint32_t _id;
+ int size;
+};
+
+
+
+
+struct rte_mbuf {
+ uint32_t magic;
+ struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+ void *buf_addr; /**< Virtual address of segment buffer. */
+ uint16_t buf_len; /**< Length of segment buffer. */
+ uint16_t data_off;
+
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+
+ /* these fields are valid for first segment only */
+ uint8_t nb_segs; /**< Number of segments. */
+ uint8_t in_port; /**< Input port. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+
+ uint32_t magic2;
+ uint32_t refcnt_reserved; /**< Do not use this field */
+} ;
+
+
+typedef struct rte_mbuf rte_mbuf_t;
+
+typedef struct rte_mempool rte_mempool_t;
+
+#define RTE_PKTMBUF_HEADROOM 0
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id
+ );
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id);
+
+inline unsigned rte_mempool_count(rte_mempool_t *mp){
+ return (10);
+}
+
+
+
+void rte_pktmbuf_free(rte_mbuf_t *m);
+
+rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp);
+
+char *rte_pktmbuf_append(rte_mbuf_t *m, uint16_t len);
+
+char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len);
+
+int rte_pktmbuf_trim(rte_mbuf_t *m, uint16_t len);
+
+void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md);
+
+
+
+
+void rte_pktmbuf_free_seg(rte_mbuf_t *m);
+
+uint16_t rte_mbuf_refcnt_update(rte_mbuf_t *m, int16_t value);
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after(rte_mbuf_t *m1,rte_mbuf_t *m2);
+rte_mbuf_t * utl_rte_pktmbuf_add_after2(rte_mbuf_t *m1,rte_mbuf_t *m2);
+
+void rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len);
+
+
+int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p);
+
+void rte_mempool_sp_put(struct rte_mempool *mp, void *obj);
+
+inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p){
+ return (rte_mempool_sc_get(mp, obj_p));
+}
+
+inline void rte_mempool_put(struct rte_mempool *mp, void *obj){
+ rte_mempool_sp_put(mp, obj);
+}
+
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ return (memcpy(dst, src, n));
+}
+
+
+
+void rte_exit(int exit_code, const char *format, ...);
+
+static inline unsigned
+rte_lcore_to_socket_id(unsigned lcore_id){
+ return (0);
+}
+
+#define rte_pktmbuf_mtod(m, t) ((t)((char *)(m)->buf_addr + (m)->data_off))
+
+/**
+ * A macro that returns the length of the packet.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
+
+/**
+ * A macro that returns the length of the segment.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
+
+
+uint64_t rte_rand(void);
+
+
+static inline void utl_rte_pktmbuf_add_last(rte_mbuf_t *m,rte_mbuf_t *m_last){
+
+ //there could be 2 cases supported
+ //1. one mbuf
+ //2. two mbug where last is indirect
+
+ if ( m->next == NULL ) {
+ utl_rte_pktmbuf_add_after2(m,m_last);
+ }else{
+ m->next->next=m_last;
+ m->pkt_len += m_last->data_len;
+ m->nb_segs = 3;
+ }
+}
+
+
+
+
+#define __rte_cache_aligned
+
+#define CACHE_LINE_SIZE 64
+
+#define SOCKET_ID_ANY 0
+
+#endif
diff --git a/src/pal/linux/pal_utl.cpp b/src/pal/linux/pal_utl.cpp
new file mode 100755
index 00000000..de864dbd
--- /dev/null
+++ b/src/pal/linux/pal_utl.cpp
@@ -0,0 +1,29 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "pal_utl.h"
+
+
+
+
+
diff --git a/src/pal/linux/pal_utl.h b/src/pal/linux/pal_utl.h
new file mode 100755
index 00000000..38152850
--- /dev/null
+++ b/src/pal/linux/pal_utl.h
@@ -0,0 +1,70 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef PAL_UTL_H
+#define PAL_UTL_H
+
+#include <stdint.h>
+
+#define PAL_MSB(x) (((x) >> 8) & 0xff) /* most signif byte of 2-byte integer */
+#define PAL_LSB(x) ((x) & 0xff) /* least signif byte of 2-byte integer*/
+#define PAL_MSW(x) (((x) >> 16) & 0xffff) /* most signif word of 2-word integer */
+#define PAL_LSW(x) ((x) & 0xffff) /* least signif byte of 2-word integer*/
+
+/* swap the MSW with the LSW of a 32 bit integer */
+#define PAL_WORDSWAP(x) (PAL_MSW(x) | (PAL_LSW(x) << 16))
+
+#define PAL_LLSB(x) ((x) & 0xff) /* 32bit word byte/word swap macros */
+#define PAL_LNLSB(x) (((x) >> 8) & 0xff)
+#define PAL_LNMSB(x) (((x) >> 16) & 0xff)
+#define PAL_LMSB(x) (((x) >> 24) & 0xff)
+#define PAL_LONGSWAP(x) ((PAL_LLSB(x) << 24) | \
+ (PAL_LNLSB(x) << 16)| \
+ (PAL_LNMSB(x) << 8) | \
+ (PAL_LMSB(x)))
+
+#define PAL_NTOHL(x) ((uint32_t)(PAL_LONGSWAP((uint32_t)x)))
+#define PAL_NTOHS(x) ((uint16_t)(((PAL_MSB((uint16_t)x))) | (PAL_LSB((uint16_t)x) << 8)))
+
+#define PAL_HTONS(x) (PAL_NTOHS(x))
+#define PAL_HTONL(x) (PAL_NTOHL(x))
+
+static inline uint64_t pal_ntohl64(uint64_t x){
+ uint32_t high=(uint32_t)((x&0xffffffff00000000ULL)>>32);
+ uint32_t low=(uint32_t)((x&0xffffffff));
+ uint64_t res=((uint64_t)(PAL_LONGSWAP(low)))<<32 | PAL_LONGSWAP(high);
+ return (res);
+}
+
+#define PAL_NTOHLL(x) (pal_ntohl64(x))
+
+
+#define unlikely(a) (a)
+#define likely(a) (a)
+
+static inline void rte_pause (void)
+{
+}
+
+
+
+#endif
+
+
diff --git a/src/pal/linux/sanb_atomic.h b/src/pal/linux/sanb_atomic.h
new file mode 100755
index 00000000..8d24f8f4
--- /dev/null
+++ b/src/pal/linux/sanb_atomic.h
@@ -0,0 +1,175 @@
+#ifndef SANB_ATOMIC_
+#define SANB_ATOMIC_
+#include <stdlib.h>
+
+#define FORCE_NON_INILINE __attribute__((noinline))
+
+static inline void sanb_smp_store_memory_barrier (void)
+{
+ asm volatile ("sfence":::"memory");
+}
+
+static inline void sanb_smp_load_memory_barrier (void)
+{
+ asm volatile ("lfence":::"memory");
+}
+
+static inline void sanb_smp_memory_barrier (void)
+{
+ asm volatile ("mfence":::"memory");
+}
+
+
+static inline bool
+sanb_atomic_compare_and_set_32 (uint32_t old_value, uint32_t new_value,
+ volatile uint32_t *addr)
+{
+ long result;
+#if __WORDSIZE == 64 || defined(__x86_64__)
+ asm volatile (" lock cmpxchgl %2, 0(%3);" /* do the atomic operation */
+ " sete %b0;" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbq %b0, %0;"/* zero extend %al to all of %eax */
+ : "=a" (result)
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory" );
+#else
+ asm volatile (" lock cmpxchgl %2, 0(%3);" /* do the atomic operation */
+ " sete %b0;" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbl %b0, %0;"/* zero extend %al to all of %eax */
+ : "=a" (result)
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory" );
+#endif
+ return (bool)result;
+}
+
+
+
+/*
+ * FIXME: on some processors the cmpxchg8b() instruction does not exist. On
+ * those processors this will cause a seg-fault. The only way to implement
+ * this operation on such a processor is to use a global lock.
+ */
+static inline bool
+sanb_atomic_compare_and_set_64 (uint64_t old_value, uint64_t new_value,
+ volatile void *ptr)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+#if __WORDSIZE == 64 || defined(__x86_64__)
+ uint64_t prev;
+ asm volatile (" lock cmpxchgq %2, 0(%3);" /* do the atomic operation */
+ : "=a" (prev) /* result will be stored in RAX */
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory");
+ return (bool) (prev == old_value);
+#else
+ uint64_t result;
+ asm volatile (" movl 0(%2), %%ebx;" /* load ECX:EBX with new_value */
+ " movl 4(%2), %%ecx;"
+ " lock cmpxchg8b 0(%3);" /* do the atomic operation */
+ " sete %b0;\n" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbl %b0, %0;"/* zero extend %al to all of %eax */
+ : "=A" (result) /* result will be stored in EDX:EAX */
+ : "0" (old_value), "r" (&new_value), "r" (addr)
+ : "memory", "ecx", "ebx" );
+ return (bool) result;
+#endif
+}
+
+
+static inline void
+sanb_atomic_add_32 (volatile uint32_t *addr, uint32_t increment)
+{
+ asm volatile (" lock addl %0, 0(%1);"
+ :
+ : "q" (increment), "r" (addr)
+ : "memory" );
+}
+
+
+static inline void
+sanb_atomic_subtract_32 (volatile uint32_t *addr, uint32_t decrement)
+{
+ asm volatile (" lock subl %0, 0(%1);"
+ :
+ : "q" (decrement), "r" (addr)
+ : "memory" );
+}
+
+
+/*
+ * It is not possible to do an atomic 64 bit add in 32-bit mode. Fortunately
+ * it is possible to do a 64-bit cmpxchg, so we can use that to implement a
+ * 64-bit atomic_add.
+ */
+static inline void
+sanb_atomic_add_64 (volatile void *ptr, uint64_t increment)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+ uint64_t original;
+
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_64(original, original + increment, addr));
+}
+
+
+
+static inline uint32_t sanb_atomic_dec2zero32(volatile void *ptr){
+ volatile uint32_t *addr = (volatile uint32_t *)ptr;
+ uint32_t original;
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_32(original, original ? (original - 1):0, addr));
+ return (original);
+}
+
+
+
+static inline uint32_t
+sanb_atomic_add_return_32_old (volatile uint32_t *addr, uint32_t increment)
+{
+ uint32_t original;
+
+ asm volatile (" lock xaddl %1, 0(%2);"
+ : "=r" (original)
+ : "0" (increment), "q" (addr)
+ : "memory" );
+ return original ;
+}
+
+
+static inline uint64_t
+sanb_atomic_add_return_64_old (volatile void *ptr, uint64_t increment)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+ uint64_t original;
+
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_64(original, original + increment, addr));
+ return original ;
+}
+
+
+
+static inline void*
+sanb_mem_read_ptr(void **v) {
+
+ volatile void **p=(volatile void **)v;
+ return ((void *)(*p));
+}
+
+static inline void
+sanb_mem_write_ptr(void **v,void *value) {
+
+ volatile void **p=(volatile void **)v;
+ *p = value;
+}
+
+
+
+#endif