summaryrefslogtreecommitdiffstats
path: root/drivers/bus/fslmc/qbman/include
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/bus/fslmc/qbman/include
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/bus/fslmc/qbman/include')
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h322
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h4
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h183
3 files changed, 136 insertions, 373 deletions
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 529f1ea3..423087cb 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -30,32 +30,17 @@
#ifndef HEADER_COMPAT_H
#define HEADER_COMPAT_H
-#include <sched.h>
-
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdint.h>
#include <stdlib.h>
-#include <stddef.h>
#include <errno.h>
#include <string.h>
-#include <pthread.h>
-#include <net/ethernet.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <ctype.h>
#include <malloc.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <unistd.h>
-#include <sys/mman.h>
-#include <limits.h>
-#include <assert.h>
-#include <dirent.h>
-#include <inttypes.h>
#include <error.h>
+#include <linux/types.h>
#include <rte_atomic.h>
/* The following definitions are primarily to allow the single-source driver
@@ -65,53 +50,11 @@
*/
/* Required compiler attributes */
-#define __user
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
-#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
-#undef container_of
-#define container_of(ptr, type, member) ({ \
- typeof(((type *)0)->member)(*__mptr) = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
-
-#ifdef ARRAY_SIZE
-#undef ARRAY_SIZE
-#endif
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
/* Required types */
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
typedef uint64_t dma_addr_t;
-typedef cpu_set_t cpumask_t;
-typedef u32 compat_uptr_t;
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
-/* I/O operations */
-static inline u32 in_be32(volatile void *__p)
-{
- volatile u32 *p = __p;
- return *p;
-}
-
-static inline void out_be32(volatile void *__p, u32 val)
-{
- volatile u32 *p = __p;
- *p = val;
-}
/* Debugging */
#define prflush(fmt, args...) \
@@ -124,275 +67,46 @@ static inline void out_be32(volatile void *__p, u32 val)
#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
#define pr_info(fmt, args...) prflush(fmt, ##args)
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_BUS
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required.
+ */
+#define QBMAN_CCSR_TRACE
+#define QBMAN_CINH_TRACE
+#define QBMAN_CENA_TRACE
+
+#define QBMAN_CHECKING
+
#ifdef pr_debug
#undef pr_debug
#endif
-#define pr_debug(fmt, args...) {}
-#define might_sleep_if(c) {}
-#define msleep(x) {}
-#define WARN_ON(c, str) \
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#define QBMAN_BUG_ON(c) \
do { \
static int warned_##__LINE__; \
if ((c) && !warned_##__LINE__) { \
- pr_warn("%s\n", str); \
pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
warned_##__LINE__ = 1; \
} \
} while (0)
-#ifdef CONFIG_BUGON
-#define QBMAN_BUG_ON(c) WARN_ON(c, "BUG")
#else
#define QBMAN_BUG_ON(c) {}
+#define pr_debug(fmt, args...) {}
#endif
-#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
-
-/****************/
-/* Linked-lists */
-/****************/
-
-struct list_head {
- struct list_head *prev;
- struct list_head *next;
-};
-
-#define LIST_HEAD(n) \
-struct list_head n = { \
- .prev = &n, \
- .next = &n \
-}
-
-#define INIT_LIST_HEAD(p) \
-do { \
- struct list_head *__p298 = (p); \
- __p298->next = __p298; \
- __p298->prev = __p298->next; \
-} while (0)
-#define list_entry(node, type, member) \
- (type *)((void *)node - offsetof(type, member))
-#define list_empty(p) \
-({ \
- const struct list_head *__p298 = (p); \
- ((__p298->next == __p298) && (__p298->prev == __p298)); \
-})
-#define list_add(p, l) \
-do { \
- struct list_head *__p298 = (p); \
- struct list_head *__l298 = (l); \
- __p298->next = __l298->next; \
- __p298->prev = __l298; \
- __l298->next->prev = __p298; \
- __l298->next = __p298; \
-} while (0)
-#define list_add_tail(p, l) \
-do { \
- struct list_head *__p298 = (p); \
- struct list_head *__l298 = (l); \
- __p298->prev = __l298->prev; \
- __p298->next = __l298; \
- __l298->prev->next = __p298; \
- __l298->prev = __p298; \
-} while (0)
-#define list_for_each(i, l) \
- for (i = (l)->next; i != (l); i = i->next)
-#define list_for_each_safe(i, j, l) \
- for (i = (l)->next, j = i->next; i != (l); \
- i = j, j = i->next)
-#define list_for_each_entry(i, l, name) \
- for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
- i = list_entry(i->name.next, typeof(*i), name))
-#define list_for_each_entry_safe(i, j, l, name) \
- for (i = list_entry((l)->next, typeof(*i), name), \
- j = list_entry(i->name.next, typeof(*j), name); \
- &i->name != (l); \
- i = j, j = list_entry(j->name.next, typeof(*j), name))
-#define list_del(i) \
-do { \
- (i)->next->prev = (i)->prev; \
- (i)->prev->next = (i)->next; \
-} while (0)
-
/* Other miscellaneous interfaces our APIs depend on; */
-#define lower_32_bits(x) ((u32)(x))
-#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
-/* Compiler/type stuff */
-typedef unsigned int gfp_t;
-typedef uint32_t phandle;
#define __iomem
-#define EINTR 4
-#define ENODEV 19
-#define GFP_KERNEL 0
+
#define __raw_readb(p) (*(const volatile unsigned char *)(p))
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
-/* memcpy() stuff - when you know alignments in advance */
-#ifdef CONFIG_TRY_BETTER_MEMCPY
-static inline void copy_words(void *dest, const void *src, size_t sz)
-{
- u32 *__dest = dest;
- const u32 *__src = src;
- size_t __sz = sz >> 2;
-
- QBMAN_BUG_ON((unsigned long)dest & 0x3);
- QBMAN_BUG_ON((unsigned long)src & 0x3);
- QBMAN_BUG_ON(sz & 0x3);
- while (__sz--)
- *(__dest++) = *(__src++);
-}
-
-static inline void copy_shorts(void *dest, const void *src, size_t sz)
-{
- u16 *__dest = dest;
- const u16 *__src = src;
- size_t __sz = sz >> 1;
-
- QBMAN_BUG_ON((unsigned long)dest & 0x1);
- QBMAN_BUG_ON((unsigned long)src & 0x1);
- QBMAN_BUG_ON(sz & 0x1);
- while (__sz--)
- *(__dest++) = *(__src++);
-}
-
-static inline void copy_bytes(void *dest, const void *src, size_t sz)
-{
- u8 *__dest = dest;
- const u8 *__src = src;
-
- while (sz--)
- *(__dest++) = *(__src++);
-}
-#else
-#define copy_words memcpy
-#define copy_shorts memcpy
-#define copy_bytes memcpy
-#endif
-
-/* Completion stuff */
-#define DECLARE_COMPLETION(n) int n = 0
-#define complete(n) { *n = 1; }
-#define wait_for_completion(n) \
-do { \
- while (!*n) { \
- bman_poll(); \
- qman_poll(); \
- } \
- *n = 0; \
-} while (0)
-
-/* Allocator stuff */
-#define kmalloc(sz, t) malloc(sz)
-#define vmalloc(sz) malloc(sz)
-#define kfree(p) { if (p) free(p); }
-static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
-{
- void *ptr = malloc(sz);
-
- if (ptr)
- memset(ptr, 0, sz);
- return ptr;
-}
-
-static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
-{
- void *p;
-
- if (posix_memalign(&p, 4096, 4096))
- return 0;
- memset(p, 0, 4096);
- return (unsigned long)p;
-}
-
-static inline void free_page(unsigned long p)
-{
- free((void *)p);
-}
-
-/* Bitfield stuff. */
-#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
-#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
-#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
-#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
-static inline unsigned long test_bits(unsigned long mask,
- volatile unsigned long *p)
-{
- return *p & mask;
-}
-
-static inline int test_bit(int idx, volatile unsigned long *bits)
-{
- return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline void set_bits(unsigned long mask, volatile unsigned long *p)
-{
- *p |= mask;
-}
-
-static inline void set_bit(int idx, volatile unsigned long *bits)
-{
- set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
-{
- *p &= ~mask;
-}
-
-static inline void clear_bit(int idx, volatile unsigned long *bits)
-{
- clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline unsigned long test_and_set_bits(unsigned long mask,
- volatile unsigned long *p)
-{
- unsigned long ret = test_bits(mask, p);
-
- set_bits(mask, p);
- return ret;
-}
-
-static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
-{
- int ret = test_bit(idx, bits);
-
- set_bit(idx, bits);
- return ret;
-}
-
-static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
-{
- int ret = test_bit(idx, bits);
-
- clear_bit(idx, bits);
- return ret;
-}
-
-static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
-{
- while ((++idx < limit) && test_bit(idx, bits))
- ;
- return idx;
-}
-
-static inline int find_first_zero_bit(unsigned long *bits, int limit)
-{
- int idx = 0;
-
- while (test_bit(idx, bits) && (++idx < limit))
- ;
- return idx;
-}
-
-static inline u64 div64_u64(u64 n, u64 d)
-{
- return n / d;
-}
-
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
#define atomic_set(v, i) rte_atomic32_set(v, i)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index ee4b772c..14159609 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -38,10 +38,6 @@ typedef uint64_t dma_addr_t;
*
*/
-#define QMAN_REV_4000 0x04000000
-#define QMAN_REV_4100 0x04010000
-#define QMAN_REV_4101 0x04010001
-
/**
* struct qbman_block_desc - qbman block descriptor structure
* @ccsr_reg_bar: CCSR register map.
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 9e9047e2..1e656602 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -194,11 +194,38 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
/**
* struct qbman_result - structure for qbman dequeue response and/or
* notification.
- * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
* possible qbman dequeue result.
*/
struct qbman_result {
- uint32_t dont_manipulate_directly[16];
+ union {
+ struct common {
+ uint8_t verb;
+ uint8_t reserved[63];
+ } common;
+ struct dq {
+ uint8_t verb;
+ uint8_t stat;
+ __le16 seqnum;
+ __le16 oprid;
+ uint8_t reserved;
+ uint8_t tok;
+ __le32 fqid;
+ uint32_t reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ uint8_t fd[32];
+ } dq;
+ struct scn {
+ uint8_t verb;
+ uint8_t stat;
+ uint8_t state;
+ uint8_t reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
};
/* TODO:
@@ -254,11 +281,21 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
/**
* struct qbman_pull_desc - the structure for pull dequeue descriptor
- * @dont_manipulate_directly: the 6 32bit data to represent the whole
- * possible settings for pull dequeue descriptor.
*/
struct qbman_pull_desc {
- uint32_t dont_manipulate_directly[6];
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct pull {
+ uint8_t verb;
+ uint8_t numf;
+ uint8_t tok;
+ uint8_t reserved;
+ uint32_t dq_src;
+ uint64_t rsp_addr;
+ uint64_t rsp_addr_virt;
+ uint8_t padding[40];
+ } pull;
+ };
};
enum qbman_pull_type_e {
@@ -292,7 +329,7 @@ void qbman_pull_desc_clear(struct qbman_pull_desc *d);
*/
void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
struct qbman_result *storage,
- dma_addr_t storage_phys,
+ uint64_t storage_phys,
int stash);
/**
* qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
@@ -415,7 +452,20 @@ struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
* dequeue result.
*/
int qbman_result_has_new_result(struct qbman_swp *s,
- const struct qbman_result *dq);
+ struct qbman_result *dq);
+
+/**
+ * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * is completed and results are available in memory.
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_check_command_complete(struct qbman_result *dq);
+
+int qbman_check_new_result(struct qbman_result *dq);
/* -------------------------------------------------------- */
/* Parsing dequeue entries (DQRR and user-provided storage) */
@@ -537,7 +587,7 @@ int qbman_result_is_FQPN(const struct qbman_result *dq);
*
* Return the state field.
*/
-uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
/**
* qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
@@ -648,24 +698,6 @@ uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
*/
uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
-/**
- * qbman_result_SCN_state_in_mem() - Get the state in notification written
- * in memory
- * @scn: the state change notification.
- *
- * Return the state.
- */
-uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);
-
-/**
- * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written
- * in memory.
- * @scn: the state change notification.
- *
- * Return the resource id.
- */
-uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);
-
/* Type-specific "resource IDs". Mainly for illustration purposes, though it
* also gives the appropriate type widths.
*/
@@ -746,22 +778,35 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/* Enqueues */
/************/
-/**
- * struct qbman_eq_desc - structure of enqueue descriptor
- * @dont_manipulate_directly: the 8 32bit data to represent the whole
- * possible qbman enqueue setting in enqueue descriptor.
- */
+/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
- uint32_t dont_manipulate_directly[8];
+ union {
+ uint32_t donot_manipulate_directly[8];
+ struct eq {
+ uint8_t verb;
+ uint8_t dca;
+ uint16_t seqnum;
+ uint16_t orpid;
+ uint16_t reserved1;
+ uint32_t tgtid;
+ uint32_t tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t reserved[3];
+ uint8_t wae;
+ uint8_t rspid;
+ uint64_t rsp_addr;
+ } eq;
+ };
};
/**
* struct qbman_eq_response - structure of enqueue response
- * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
* enqueue response.
*/
struct qbman_eq_response {
- uint32_t dont_manipulate_directly[16];
+ uint32_t donot_manipulate_directly[16];
};
/**
@@ -801,7 +846,7 @@ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
* sequeue number.
*/
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
- uint32_t opr_id, uint32_t seqnum, int incomplete);
+ uint16_t opr_id, uint16_t seqnum, int incomplete);
/**
* qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
@@ -810,8 +855,8 @@ void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
*/
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum);
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
/**
* qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
@@ -820,8 +865,8 @@ void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
*/
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum);
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
/**
* qbman_eq_desc_set_response() - Set the enqueue response info.
* @d: the enqueue descriptor
@@ -835,7 +880,7 @@ void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
* expresses a cache-warming attribute.
*/
void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
- dma_addr_t storage_phys,
+ uint64_t storage_phys,
int stash);
/**
@@ -873,7 +918,7 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
* @qd_prio: the queuing destination priority.
*/
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
- uint32_t qd_bin, uint32_t qd_prio);
+ uint16_t qd_bin, uint8_t qd_prio);
/**
* qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
@@ -898,7 +943,7 @@ void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
* being rescheduled.)
*/
void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
- uint32_t dqrr_idx, int park);
+ uint8_t dqrr_idx, int park);
/**
* qbman_swp_enqueue() - Issue an enqueue command.
@@ -914,19 +959,33 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
/**
- * qbman_swp_enqueue_multiple_eqdesc() - Enqueue multiple frames with separte
- * enqueue descriptors.
+ * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
+ eq descriptor
* @s: the software portal used for enqueue.
- * @d: the enqueue descriptors
+ * @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
*/
-int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
int num_frames);
+/**
+ * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
+ * individual eq descriptor.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
/* TODO:
* qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
@@ -943,11 +1002,20 @@ int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
/*******************/
/**
* struct qbman_release_desc - The structure for buffer release descriptor
- * @dont_manipulate_directly: the 32bit data to represent the whole
+ * @donot_manipulate_directly: the 32bit data to represent the whole
* possible settings of qbman release descriptor.
*/
struct qbman_release_desc {
- uint32_t dont_manipulate_directly[1];
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct br {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint32_t reserved2;
+ uint64_t buf[7];
+ } br;
+ };
};
/**
@@ -961,7 +1029,7 @@ void qbman_release_desc_clear(struct qbman_release_desc *d);
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
* @d: the qbman release descriptor.
*/
-void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
/**
* qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
@@ -1004,7 +1072,7 @@ int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
* Return 0 for success, or negative error code if the acquire command
* fails.
*/
-int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
unsigned int num_buffers);
/*****************/
@@ -1119,19 +1187,4 @@ int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
*/
int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
uint64_t ctx);
-int qbman_swp_fill_ring(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- uint8_t burst_index);
-int qbman_swp_flush_ring(struct qbman_swp *s);
-void qbman_sync(void);
-int qbman_swp_send_multiple(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int frames_to_send);
-
-int qbman_check_command_complete(struct qbman_swp *s,
- const struct qbman_result *dq);
-
-int qbman_get_version(void);
#endif /* !_FSL_QBMAN_PORTAL_H */