[PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown

Boris Pismenny borisp at nvidia.com
Thu Jul 22 04:03:20 PDT 2021


From: Yoray Zack <yorayz at nvidia.com>

  This commit add support for DDGST TX offload to the mlx5e_nvmeotcp_queue_init/teardown function.
  If enable, mlx5e_nvmeotcp_queue_init will call mlx5e_nvmeotcp_queue_tx_init to handle TX offload init.

  Add to mlx5e NVMEoTCP queue is responsible for:
  - Create a separate TIS to identify the queue and maintain the HW context
  - Update ulp_ddp_ctx params.

Signed-off-by: Yoray Zack <yorayz at nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/nvmeotcp.c    | 47 +++++++++++++++++++
 .../mellanox/mlx5/core/en_accel/nvmeotcp.h    | 12 ++++-
 2 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index d42f346ac8f5..6023e1ae7be4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -669,6 +669,36 @@ mlx5e_nvmeotcp_queue_rx_init(struct mlx5e_nvmeotcp_queue *queue,
 	return err;
 }
 
+static int
+mlx5e_nvmeotcp_queue_tx_init(struct mlx5e_nvmeotcp_queue *queue,
+			     struct mlx5_core_dev *mdev,
+			     struct net_device *netdev)
+{
+	struct sock *sk = queue->sk;
+	int err, tisn;
+
+	err = mlx5e_nvmeotcp_create_tis(mdev, &tisn);
+
+	if (err) {
+		mlx5_core_err(mdev, "create tis failed, %d\n", err);
+		return err;
+	}
+
+	queue->tisn = tisn;
+	queue->ulp_ddp_ctx.expected_seq = tcp_sk(sk)->write_seq;
+	queue->pending = true;
+	queue->end_seq_hint = 0;
+	queue->ulp_ddp_ctx.netdev = netdev;
+	queue->ulp_ddp_ctx.ddgst_len = 4;
+
+	/* following this assignment mlx5e_nvmeotcp_is_sk_tx_device_offloaded
+	 * will return true and ulp_ddp_ctx might be accessed
+	 * by the netdev's xmit function.
+	 */
+	smp_store_release(&sk->sk_validate_xmit_skb, ulp_ddp_validate_xmit_skb);
+	return err;
+}
+
 #define OCTWORD_SHIFT 4
 #define MAX_DS_VALUE 63
 static int
@@ -680,6 +710,8 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 	bool crc_rx = ((netdev->features & NETIF_F_HW_ULP_DDP) &&
 		       (config->dgst & NVME_TCP_DATA_DIGEST_ENABLE));
 	bool zerocopy = (netdev->features & NETIF_F_HW_ULP_DDP);
+	bool crc_tx = (config->dgst & NVME_TCP_DATA_DIGEST_ENABLE)  &&
+		(netdev->features & NETIF_F_HW_ULP_DDP);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5e_nvmeotcp_queue *queue;
@@ -709,6 +741,7 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 		goto free_queue;
 	}
 
+	queue->crc_tx = crc_tx;
 	queue->crc_rx = crc_rx;
 	queue->zerocopy = zerocopy;
 	queue->ulp_ddp_ctx.type = ULP_DDP_NVME;
@@ -736,6 +769,12 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 	if (err)
 		goto destroy_rx;
 
+	if (crc_tx) {
+		err = mlx5e_nvmeotcp_queue_tx_init(queue, mdev, netdev);
+		if (err)
+			goto remove_queue_from_hash;
+	}
+
 	stats->nvmeotcp_queue_init++;
 	write_lock_bh(&sk->sk_callback_lock);
 	ulp_ddp_set_ctx(sk, queue);
@@ -743,6 +782,9 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 	refcount_set(&queue->ref_count, 1);
 	return err;
 
+remove_queue_from_hash:
+	rhashtable_remove_fast(&priv->nvmeotcp->queue_hash,
+			       &queue->hash, rhash_queues);
 destroy_rx:
 	if (zerocopy || crc_rx)
 		mlx5e_nvmeotcp_destroy_rx(queue, mdev, zerocopy);
@@ -778,6 +820,11 @@ mlx5e_nvmeotcp_queue_teardown(struct net_device *netdev,
 	rhashtable_remove_fast(&priv->nvmeotcp->queue_hash, &queue->hash,
 			       rhash_queues);
 	ida_simple_remove(&priv->nvmeotcp->queue_ids, queue->id);
+	if (queue->crc_tx) {
+		smp_store_release(&sk->sk_validate_xmit_skb, NULL);
+		mlx5e_nvmeotcp_delete_tis(priv, queue->tisn);
+	}
+
 	write_lock_bh(&sk->sk_callback_lock);
 	ulp_ddp_set_ctx(sk, NULL);
 	write_unlock_bh(&sk->sk_callback_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
index b9642e130b97..3bc45b81da06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
@@ -47,12 +47,16 @@ struct mlx5e_nvmeotcp_sq {
  *	@sk: The socket used by the NVMe-TCP queue
  *	@zerocopy: if this queue is used for zerocopy offload.
  *	@crc_rx: if this queue is used for CRC Rx offload.
+ *	@crc_tx: if this queue is used for CRC Tx offload.
  *	@ccid: ID of the current CC
  *	@ccsglidx: Index within the scatter-gather list (SGL) of the current CC
  *	@ccoff_inner: Current offset within the @ccsglidx element
  *	@priv: mlx5e netdev priv
  *	@inv_done: invalidate callback of the nvme tcp driver
  *	@after_resync_cqe: indicate if resync occurred
+ *	@tisn: Destination TIS number created for NVMEoTCP CRC TX offload
+ *	@pending: indicate if static/progress params need to be send to NIC.
+ *	@end_seq_hint: Tx ooo - offload packet only if it ends after the hint.
  */
 struct mlx5e_nvmeotcp_queue {
 	struct ulp_ddp_ctx		ulp_ddp_ctx;
@@ -66,7 +70,7 @@ struct mlx5e_nvmeotcp_queue {
 	u32				tag_buf_table_id;
 	struct rhash_head		hash;
 	refcount_t			ref_count;
-	bool				dgst;
+	int				dgst;
 	int				pda;
 	u32				ccid_gen;
 	u32				max_klms_per_wqe;
@@ -74,6 +78,12 @@ struct mlx5e_nvmeotcp_queue {
 	struct sock			*sk;
 	bool				zerocopy;
 	bool				crc_rx;
+	bool				crc_tx;
+	/* for crc_tx offload */
+	int				tisn;
+	bool				pending;
+	u32				end_seq_hint;
+	u32				start_pdu_hint;
 
 	/* current ccid fields */
 	off_t				ccoff;
-- 
2.24.1




More information about the Linux-nvme mailing list