diff options
author | Nathan Brown <nathan.brown@arm.com> | 2022-12-30 20:04:39 +0000 |
---|---|---|
committer | Beno�t Ganne <bganne@cisco.com> | 2023-02-13 15:36:28 +0000 |
commit | 844a0e8b07010ccdd5af211b91d022aa8d43c934 (patch) | |
tree | 6d53b86703a57e9f42a471d0c44a5cd0f6d23c17 /src | |
parent | 1d998b7c27e48f71a5f3edfaf182abbcc88eca07 (diff) |
rdma: always use 64 byte CQEs for MLX5
When DPDK MLX PMDs are built, and the DPDK plugin is loaded, DPDK may
set the MLX5_CQE_SIZE environment variable to 128. This causes the RDMA
plugin to be unable to create completion queues. Since the RDMA plugin
expects the CQEs to be 64 bytes, set the cqe_size explicitly when
creating the CQ. This avoids any issues with different values for the
MLX5_CQE_SIZE environment variable.
Type: improvement
Signed-off-by: Nathan Brown <nathan.brown@arm.com>
Change-Id: Idfd078d3045a4dcb674325ef36f85a89df6fbebc
Diffstat (limited to 'src')
-rw-r--r-- | src/plugins/rdma/device.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/src/plugins/rdma/device.c b/src/plugins/rdma/device.c index 3fdc399a4e3..554304c1daf 100644 --- a/src/plugins/rdma/device.c +++ b/src/plugins/rdma/device.c @@ -445,9 +445,10 @@ rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc, if (is_mlx5dv) { struct mlx5dv_cq_init_attr dvcq = { }; - dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE | + MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE; dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; - + dvcq.cqe_size = 64; if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0) return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed"); } @@ -717,15 +718,32 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) struct ibv_qp_init_attr qpia; struct ibv_qp_attr qpa; int qp_flags; + int is_mlx5dv = !!(rd->flags & RDMA_DEVICE_F_MLX5DV); vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES); txq = vec_elt_at_index (rd->txqs, qid); ASSERT (is_pow2 (n_desc)); txq->bufs_log2sz = min_log2 (n_desc); vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES); - - if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) - return clib_error_return_unix (0, "Create CQ Failed"); + if (is_mlx5dv) + { + struct ibv_cq_init_attr_ex cqa = {}; + struct ibv_cq_ex *cqex; + struct mlx5dv_cq_init_attr dvcq = {}; + dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE | + MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE; + dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; + dvcq.cqe_size = 64; + cqa.cqe = n_desc; + if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0) + return clib_error_return_unix (0, "Create mlx5dv tx CQ Failed"); + txq->cq = ibv_cq_ex_to_cq (cqex); + } + else + { + if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) + return clib_error_return_unix (0, "Create CQ Failed"); + } memset (&qpia, 0, sizeof (qpia)); qpia.send_cq = txq->cq; |