[PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload

Boris Pismenny borisp at nvidia.com
Thu Jul 22 04:03:16 PDT 2021


From: Yoray Zack <yorayz at nvidia.com>

  NVMEoTCP CRC Tx offload is similar to TLS Tx offload,
  and uses DUMP wqe as well.

  To avoid duplicate functions the following changes were added:

  1. Add DUMP_WQE.type field  (=TLS or NVMEoTCP).
  2. change in mlx5e_ktls_tx_handle_resync_dump_comp
     to handle also NVMEoTCP Tx DUMP WQE.

Signed-off-by: Yoray Zack <yorayz at nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h    |  5 +++++
 .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c   | 12 ++++++++++--
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index f0190ee6e42c..c7f979dfdd69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -77,6 +77,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+enum mlx5e_dump_wqe_type {
+	MLX5E_DUMP_WQE_TLS,
+	MLX5E_DUMP_WQE_NVMEOTCP,
+};
 
 static inline bool
 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
@@ -140,6 +144,7 @@ struct mlx5e_tx_wqe_info {
 	u8 num_fifo_pkts;
 #ifdef CONFIG_MLX5_EN_TLS
 	struct page *resync_dump_frag_page;
+	enum mlx5e_dump_wqe_type type;
 #endif
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 9ad3459fb63a..64780d0143ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -154,6 +154,7 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
 		.num_wqebbs = num_wqebbs,
 		.num_bytes  = num_bytes,
 		.resync_dump_frag_page = page,
+		.type = MLX5E_DUMP_WQE_TLS,
 	};
 }
 
@@ -358,8 +359,15 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
 
 	mlx5e_tx_dma_unmap(sq->pdev, dma);
 	put_page(wi->resync_dump_frag_page);
-	stats->tls_dump_packets++;
-	stats->tls_dump_bytes += wi->num_bytes;
+
+	switch (wi->type) {
+	case MLX5E_DUMP_WQE_TLS:
+		stats->tls_dump_packets++;
+		stats->tls_dump_bytes += wi->num_bytes;
+		break;
+	case MLX5E_DUMP_WQE_NVMEOTCP:
+		break;
+	}
 }
 
 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-- 
2.24.1




More information about the Linux-nvme mailing list