[PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV
Boris Pismenny
borisp at nvidia.com
Thu Jul 22 04:03:21 PDT 2021
From: Yoray Zack <yorayz at nvidia.com>
Change the function that build NVMEoTCP progress params and static params,
to work for Tx/Rx.
Signed-off-by: Yoray Zack <yorayz at nvidia.com>
---
.../mellanox/mlx5/core/en_accel/nvmeotcp.c | 130 ++++++++++++++----
.../mlx5/core/en_accel/nvmeotcp_utils.h | 4 +-
2 files changed, 108 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index 6023e1ae7be4..624d8a28dc21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -155,8 +155,11 @@ static int mlx5e_nvmeotcp_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
}
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_TIR_PARAMS 0x2
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_TIS_PARAMS 0x1
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS 0x2
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_UMR 0x0
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_STATIC_PARAMS 0x1
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_PROGRESS_PARAMS 0x3
#define STATIC_PARAMS_DS_CNT \
DIV_ROUND_UP(MLX5E_NVMEOTCP_STATIC_PARAMS_WQE_SZ, MLX5_SEND_WQE_DS)
@@ -250,56 +253,75 @@ build_nvmeotcp_klm_umr(struct mlx5e_nvmeotcp_queue *queue,
static void
fill_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5_seg_nvmeotcp_progress_params *params,
- u32 seq)
+ u32 seq, bool is_tx)
{
void *ctx = params->ctx;
- params->tir_num = cpu_to_be32(queue->tirn);
+ params->tir_num = is_tx ? cpu_to_be32(queue->tisn) : cpu_to_be32(queue->tirn);
MLX5_SET(nvmeotcp_progress_params, ctx,
next_pdu_tcp_sn, seq);
MLX5_SET(nvmeotcp_progress_params, ctx, pdu_tracker_state,
MLX5E_NVMEOTCP_PROGRESS_PARAMS_PDU_TRACKER_STATE_START);
+ if (is_tx)
+ MLX5_SET(nvmeotcp_progress_params, ctx, offloading_state, 0);
+}
+
+static void nvme_tx_fill_wi(struct mlx5e_txqsq *sq,
+ u16 pi, u8 num_wqebbs, u32 num_bytes,
+ struct page *page, enum mlx5e_dump_wqe_type type)
+{
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ };
}
void
build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe,
- u32 seq)
+ u32 seq, bool is_rx, bool resync, u16 pc, u32 sqn)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- u32 sqn = queue->sq->icosq.sqn;
- u16 pc = queue->sq->icosq.pc;
- u8 opc_mod;
+ u8 opc_mod = is_rx ?
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_PROGRESS_PARAMS :
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_PROGRESS_PARAMS;
memset(wqe, 0, MLX5E_NVMEOTCP_PROGRESS_PARAMS_WQE_SZ);
- opc_mod = MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_PROGRESS_PARAMS;
+
cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_SET_PSV | (opc_mod << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
PROGRESS_PARAMS_DS_CNT);
- fill_nvmeotcp_progress_params(queue, &wqe->params, seq);
+ fill_nvmeotcp_progress_params(queue, &wqe->params, seq, !is_rx);
}
static void
fill_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5_seg_nvmeotcp_static_params *params,
- u32 resync_seq, bool zero_copy_en,
+ u32 resync_seq, bool is_rx, bool zero_copy_en,
bool ddgst_offload_en)
{
void *ctx = params->ctx;
+ int pda = queue->pda;
+ bool hddgst_en = queue->dgst & NVME_TCP_HDR_DIGEST_ENABLE;
+ bool ddgst_en = queue->dgst & NVME_TCP_DATA_DIGEST_ENABLE;
+
+ if (!is_rx) {
+ pda = 0;
+ }
MLX5_SET(transport_static_params, ctx, const_1, 1);
MLX5_SET(transport_static_params, ctx, const_2, 2);
MLX5_SET(transport_static_params, ctx, acc_type,
MLX5_TRANSPORT_STATIC_PARAMS_ACC_TYPE_NVMETCP);
MLX5_SET(transport_static_params, ctx, nvme_resync_tcp_sn, resync_seq);
- MLX5_SET(transport_static_params, ctx, pda, queue->pda);
- MLX5_SET(transport_static_params, ctx, ddgst_en,
- queue->dgst & NVME_TCP_DATA_DIGEST_ENABLE);
+ MLX5_SET(transport_static_params, ctx, pda, pda);
+ MLX5_SET(transport_static_params, ctx, ddgst_en, ddgst_en);
MLX5_SET(transport_static_params, ctx, ddgst_offload_en, ddgst_offload_en);
- MLX5_SET(transport_static_params, ctx, hddgst_en,
- queue->dgst & NVME_TCP_HDR_DIGEST_ENABLE);
+ MLX5_SET(transport_static_params, ctx, hddgst_en, hddgst_en);
MLX5_SET(transport_static_params, ctx, hdgst_offload_en, 0);
MLX5_SET(transport_static_params, ctx, ti,
MLX5_TRANSPORT_STATIC_PARAMS_TI_INITIATOR);
@@ -310,26 +332,31 @@ fill_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
void
build_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_static_params_wqe *wqe,
- u32 resync_seq, bool zerocopy, bool crc_rx)
+ u32 resync_seq, bool is_rx, u16 pc, u32 sqn,
+ bool zerocopy, bool crc_rx)
{
- u8 opc_mod = MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- u32 sqn = queue->sq->icosq.sqn;
- u16 pc = queue->sq->icosq.pc;
+ int tirn_tisn = is_rx ? queue->tirn : queue->tisn;
+ u8 opc_mod = is_rx ?
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS :
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_STATIC_PARAMS;
+
memset(wqe, 0, MLX5E_NVMEOTCP_STATIC_PARAMS_WQE_SZ);
- cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR | (opc_mod) << 24);
+ cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR | (opc_mod) << 24);
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
- cseg->imm = cpu_to_be32(queue->tirn << MLX5_WQE_CTRL_TIR_TIS_INDEX_SHIFT);
+ cseg->imm = cpu_to_be32(tirn_tisn <<
+ MLX5_WQE_CTRL_TIR_TIS_INDEX_SHIFT);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords =
cpu_to_be16(MLX5E_NVMEOTCP_STATIC_PARAMS_OCTWORD_SIZE);
- fill_nvmeotcp_static_params(queue, &wqe->params, resync_seq, zerocopy, crc_rx);
+ fill_nvmeotcp_static_params(queue, &wqe->params, resync_seq,
+ is_rx, zerocopy, crc_rx);
}
static void
@@ -371,7 +398,8 @@ mlx5e_nvmeotcp_rx_post_static_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_STATIC_PARAMS_WQE(sq, pi);
mlx5e_nvmeotcp_fill_wi(NULL, sq, wqe_bbs, pi, 0, BSF_UMR);
- build_nvmeotcp_static_params(queue, wqe, resync_seq, queue->zerocopy, queue->crc_rx);
+ build_nvmeotcp_static_params(queue, wqe, resync_seq, true, sq->pc,
+ sq->sqn, queue->zerocopy, queue->crc_rx);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
spin_unlock(&queue->nvmeotcp_icosq_lock);
@@ -389,7 +417,7 @@ mlx5e_nvmeotcp_rx_post_progress_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_PROGRESS_PARAMS_WQE(sq, pi);
mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, 0, SET_PSV_UMR);
- build_nvmeotcp_progress_params(queue, wqe, seq);
+ build_nvmeotcp_progress_params(queue, wqe, seq, true, false, sq->pc, sq->sqn);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
@@ -1078,6 +1106,60 @@ int mlx5e_nvmeotcp_init(struct mlx5e_priv *priv)
return ret;
}
+static
+void mlx5e_nvmeotcp_tx_post_static_params(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_set_nvmeotcp_static_params_wqe *wqe;
+ enum mlx5e_dump_wqe_type type = MLX5E_DUMP_WQE_NVMEOTCP;
+ u16 pi, wqe_bbs;
+
+ wqe_bbs = MLX5E_NVMEOTCP_STATIC_PARAMS_WQEBBS;
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_bbs);
+ wqe = MLX5E_NVMEOTCP_FETCH_STATIC_PARAMS_WQE(sq, pi);
+ nvme_tx_fill_wi(sq, pi, wqe_bbs, 0, NULL, type);
+ build_nvmeotcp_static_params(queue, wqe, 0, false,
+ sq->pc, sq->sqn, false, true);
+ sq->pc += wqe_bbs;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+static
+void mlx5e_nvmeotcp_tx_post_progress_params(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, u32 seq,
+ bool resync)
+{
+ struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe;
+ enum mlx5e_dump_wqe_type type = MLX5E_DUMP_WQE_NVMEOTCP;
+ u16 pi, wqe_bbs;
+
+ wqe_bbs = MLX5E_NVMEOTCP_PROGRESS_PARAMS_WQEBBS;
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_bbs);
+ wqe = MLX5E_NVMEOTCP_FETCH_PROGRESS_PARAMS_WQE(sq, pi);
+ nvme_tx_fill_wi(sq, pi, wqe_bbs, 0, NULL, type);
+ build_nvmeotcp_progress_params(queue, wqe, seq, false, resync, sq->pc, sq->sqn);
+ sq->pc += wqe_bbs;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+static
+bool mlx5e_nvmeotcp_test_and_clear_pending(struct mlx5e_nvmeotcp_queue *ctx)
+{
+ bool ret = ctx->pending;
+
+ ctx->pending = false;
+
+ return ret;
+}
+
+static
+void mlx5e_nvmeotcp_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct sock *sk,
+ struct mlx5e_nvmeotcp_queue *ctx)
+{
+ mlx5e_nvmeotcp_tx_post_static_params(ctx, sq);
+ mlx5e_nvmeotcp_tx_post_progress_params(ctx, sq, tcp_sk(sk)->copied_seq, false);
+}
+
void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_nvmeotcp *nvmeotcp = priv->nvmeotcp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
index 44671e28a9ea..e7436aa01ad4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
@@ -69,12 +69,12 @@ struct mlx5e_get_psv_wqe {
void
build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe,
- u32 seq);
+ u32 seq, bool is_rx, bool is_resync, u16 pc, u32 sqn);
void
build_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_static_params_wqe *wqe,
- u32 resync_seq,
+ u32 resync_seq, bool is_rx, u16 pc, u32 sqn,
bool zerocopy, bool crc_rx);
#endif /* __MLX5E_NVMEOTCP_UTILS_H__ */
--
2.24.1
More information about the Linux-nvme
mailing list