@@ -844,6 +844,7 @@ mlx5e_nvmeotcp_dev_resync(struct net_device *netdev,
struct mlx5e_nvmeotcp_queue *queue =
(struct mlx5e_nvmeotcp_queue *)tcp_ddp_get_ctx(sk);
+ queue->after_resync_cqe = 1;
mlx5e_nvmeotcp_rx_post_static_params_wqe(queue, seq);
}
@@ -50,6 +50,7 @@ struct mlx5e_nvmeotcp_sq {
* @ccoff_inner: Current offset within the @ccsglidx element
* @priv: mlx5e netdev priv
* @inv_done: invalidate callback of the nvme tcp driver
+ * @after_resync_cqe: indicate if resync occurred
*/
struct mlx5e_nvmeotcp_queue {
struct tcp_ddp_ctx tcp_ddp_ctx;
@@ -82,6 +83,8 @@ struct mlx5e_nvmeotcp_queue {
/* for flow_steering flow */
struct completion done;
+ /* for MASK HW resync cqe */
+ bool after_resync_cqe;
};
struct mlx5e_nvmeotcp {
@@ -175,6 +175,20 @@ mlx5e_nvmeotcp_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
return skb;
}
+#ifdef CONFIG_TCP_DDP_CRC
+ /* If a resync occurred in the previous cqe,
+ * the current cqe.crcvalid bit may not be valid,
+ * so we will treat it as 0
+ */
+ skb->ddp_crc = queue->after_resync_cqe ? 0 :
+ cqe_is_nvmeotcp_crcvalid(cqe);
+ queue->after_resync_cqe = 0;
+#endif
+ if (!cqe_is_nvmeotcp_zc(cqe)) {
+ mlx5e_nvmeotcp_put_queue(queue);
+ return skb;
+ }
+
stats = priv->channels.c[queue->channel_ix]->rq.stats;
/* cc ddp from cqe */
@@ -1079,10 +1079,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
-#if defined(CONFIG_TCP_DDP_CRC) && defined(CONFIG_MLX5_EN_NVMEOTCP)
- skb->ddp_crc = cqe_is_nvmeotcp_crcvalid(cqe);
-#endif
-
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1197,7 +1193,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
page_ref_inc(di->page);
#if defined(CONFIG_TCP_DDP) && defined(CONFIG_MLX5_EN_NVMEOTCP)
- if (cqe_is_nvmeotcp_zc_or_resync(cqe))
+ if (cqe_is_nvmeotcp(cqe))
skb = mlx5e_nvmeotcp_handle_rx_skb(rq->netdev, skb, cqe,
cqe_bcnt, true);
#endif
@@ -1253,7 +1249,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
skb->len += headlen;
#if defined(CONFIG_TCP_DDP) && defined(CONFIG_MLX5_EN_NVMEOTCP)
- if (cqe_is_nvmeotcp_zc_or_resync(cqe))
+ if (cqe_is_nvmeotcp(cqe))
skb = mlx5e_nvmeotcp_handle_rx_skb(rq->netdev, skb, cqe,
cqe_bcnt, false);
#endif
@@ -1486,7 +1482,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
skb->len += headlen;
#if defined(CONFIG_TCP_DDP) && defined(CONFIG_MLX5_EN_NVMEOTCP)
- if (cqe_is_nvmeotcp_zc_or_resync(cqe))
+ if (cqe_is_nvmeotcp(cqe))
skb = mlx5e_nvmeotcp_handle_rx_skb(rq->netdev, skb, cqe,
cqe_bcnt, false);
#endif
@@ -1539,7 +1535,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
page_ref_inc(di->page);
#if defined(CONFIG_TCP_DDP) && defined(CONFIG_MLX5_EN_NVMEOTCP)
- if (cqe_is_nvmeotcp_zc_or_resync(cqe))
+ if (cqe_is_nvmeotcp(cqe))
skb = mlx5e_nvmeotcp_handle_rx_skb(rq->netdev, skb, cqe,
cqe_bcnt, true);
#endif
@@ -882,9 +882,9 @@ static inline bool cqe_is_nvmeotcp_zc(struct mlx5_cqe64 *cqe)
return ((cqe->nvmetcp >> 4) & 0x1);
}
-static inline bool cqe_is_nvmeotcp_zc_or_resync(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_nvmeotcp(struct mlx5_cqe64 *cqe)
{
- return ((cqe->nvmetcp >> 4) & 0x5);
+ return ((cqe->nvmetcp >> 4) & 0x7);
}
static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)