Skip to content

Commit

Permalink
net/mlx5: replenish MPRQ buffers for miniCQEs
Browse files Browse the repository at this point in the history
Keep unzipping if the next CQE is the miniCQE array in
rxq_cq_decompress_v() routine only for non-MPRQ scenario,
MPRQ requires buffer replenishment between the miniCQEs.

Restore the check for the initial compressed CQE for SPRQ
and check that the current CQE is not compressed before
copying it as a possible title CQE.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
  • Loading branch information
aleks-kozyrev authored and raslandarawsheh committed Jul 22, 2024
1 parent 95c0275 commit 3da25b1
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 20 deletions.
56 changes: 42 additions & 14 deletions drivers/net/mlx5/mlx5_rxtx_vec.c
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,15 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
}
/* At this point, there shouldn't be any remaining packets. */
MLX5_ASSERT(rxq->decompressed == 0);
/* Go directly to unzipping in case the first CQE is compressed. */
if (rxq->cqe_comp_layout) {
ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
if (ret == MLX5_CQE_STATUS_SW_OWN &&
(MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) {
comp_idx = 0;
goto decompress;
}
}
/* Process all the CQEs */
nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
/* If no new CQE seen, return without updating cq_db. */
Expand All @@ -345,18 +354,23 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
rcvd_pkt += nocmp_n;
/* Copy title packet for future compressed sessions. */
if (rxq->cqe_comp_layout) {
next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci);
if (ret != MLX5_CQE_STATUS_SW_OWN ||
MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
sizeof(struct rte_mbuf));
ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
if (ret == MLX5_CQE_STATUS_SW_OWN &&
(MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci);
if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
ret != MLX5_CQE_STATUS_SW_OWN)
rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
sizeof(struct rte_mbuf));
}
}
decompress:
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
&elts[nocmp_n]);
&elts[nocmp_n], true);
rxq->cq_ci += rxq->decompressed;
/* Return more packets if needed. */
if (nocmp_n < pkts_n) {
Expand Down Expand Up @@ -482,6 +496,15 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
}
/* At this point, there shouldn't be any remaining packets. */
MLX5_ASSERT(rxq->decompressed == 0);
/* Go directly to unzipping in case the first CQE is compressed. */
if (rxq->cqe_comp_layout) {
ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
if (ret == MLX5_CQE_STATUS_SW_OWN &&
(MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) {
comp_idx = 0;
goto decompress;
}
}
/* Process all the CQEs */
nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
/* If no new CQE seen, return without updating cq_db. */
Expand All @@ -495,18 +518,23 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
rcvd_pkt += cp_pkt;
/* Copy title packet for future compressed sessions. */
if (rxq->cqe_comp_layout) {
next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci);
if (ret != MLX5_CQE_STATUS_SW_OWN ||
MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
sizeof(struct rte_mbuf));
ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
if (ret == MLX5_CQE_STATUS_SW_OWN &&
(MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci);
if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
ret != MLX5_CQE_STATUS_SW_OWN)
rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
sizeof(struct rte_mbuf));
}
}
decompress:
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
&elts[nocmp_n]);
&elts[nocmp_n], false);
/* Return more packets if needed. */
if (nocmp_n < pkts_n) {
uint16_t n = rxq->decompressed;
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
* @param elts
* Pointer to SW ring to be filled. The first mbuf has to be pre-built from
* the title completion descriptor to be copied to the rest of mbufs.
* @param keep
* Keep unzipping if the next CQE is the miniCQE array.
*
* @return
* Number of mini-CQEs successfully decompressed.
*/
static inline uint16_t
rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
struct rte_mbuf **elts, bool keep)
{
volatile struct mlx5_mini_cqe8 *mcq =
(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
Expand Down Expand Up @@ -507,7 +509,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
}
}

if (rxq->cqe_comp_layout) {
if (rxq->cqe_comp_layout && keep) {
int ret;
/* Keep unzipping if the next CQE is the miniCQE array. */
cq = &cq[mcqe_n];
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/mlx5/mlx5_rxtx_vec_neon.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
* @param elts
* Pointer to SW ring to be filled. The first mbuf has to be pre-built from
* the title completion descriptor to be copied to the rest of mbufs.
* @param keep
* Keep unzipping if the next CQE is the miniCQE array.
*
* @return
* Number of mini-CQEs successfully decompressed.
*/
static inline uint16_t
rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
struct rte_mbuf **elts, bool keep)
{
volatile struct mlx5_mini_cqe8 *mcq =
(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
Expand Down Expand Up @@ -372,7 +374,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
}
}
}
if (rxq->cqe_comp_layout) {
if (rxq->cqe_comp_layout && keep) {
int ret;
/* Keep unzipping if the next CQE is the miniCQE array. */
cq = &cq[mcqe_n];
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/mlx5/mlx5_rxtx_vec_sse.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
* @param elts
* Pointer to SW ring to be filled. The first mbuf has to be pre-built from
* the title completion descriptor to be copied to the rest of mbufs.
* @param keep
* Keep unzipping if the next CQE is the miniCQE array.
*
* @return
* Number of mini-CQEs successfully decompressed.
*/
static inline uint16_t
rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
struct rte_mbuf **elts, bool keep)
{
volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + !rxq->cqe_comp_layout);
/* Title packet is pre-built. */
Expand Down Expand Up @@ -361,7 +363,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
}
}
}
if (rxq->cqe_comp_layout) {
if (rxq->cqe_comp_layout && keep) {
int ret;
/* Keep unzipping if the next CQE is the miniCQE array. */
cq = &cq[mcqe_n];
Expand Down

0 comments on commit 3da25b1

Please sign in to comment.