[PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation
Boris Pismenny
borisp at nvidia.com
Thu Jul 22 04:03:05 PDT 2021
From: Ben Ben-Ishay <benishay at nvidia.com>
Teardown ddp contexts asynchronously by posting a WQE, and calling back
to nvme-tcp when the corresponding CQE is received.
Signed-off-by: Boris Pismenny <borisp at mellanox.com>
Signed-off-by: Ben Ben-Ishay <benishay at nvidia.com>
Signed-off-by: Or Gerlitz <ogerlitz at mellanox.com>
Signed-off-by: Yoray Zack <yorayz at mellanox.com>
---
.../net/ethernet/mellanox/mlx5/core/en/txrx.h | 4 ++
.../mellanox/mlx5/core/en_accel/nvmeotcp.c | 66 ++++++++++++++++---
.../mellanox/mlx5/core/en_accel/nvmeotcp.h | 1 +
.../net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++
4 files changed, 69 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 497c49f28d8a..f0190ee6e42c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -43,6 +43,7 @@ enum mlx5e_icosq_wqe_type {
#endif
#ifdef CONFIG_MLX5_EN_NVMEOTCP
MLX5E_ICOSQ_WQE_UMR_NVME_TCP,
+ MLX5E_ICOSQ_WQE_UMR_NVME_TCP_INVALIDATE,
MLX5E_ICOSQ_WQE_SET_PSV_NVME_TCP,
#endif
};
@@ -191,6 +192,9 @@ struct mlx5e_icosq_wqe_info {
struct {
struct mlx5e_nvmeotcp_queue *queue;
} nvmeotcp_q;
+ struct {
+ struct nvmeotcp_queue_entry *entry;
+ } nvmeotcp_qe;
#endif
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index 2283b2a799f8..864b080cc8fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -152,6 +152,7 @@ enum wqe_type {
BSF_KLM_UMR = 1,
SET_PSV_UMR = 2,
BSF_UMR = 3,
+ KLM_INV_UMR = 4,
};
static void
@@ -208,6 +209,13 @@ build_nvmeotcp_klm_umr(struct mlx5e_nvmeotcp_queue *queue,
MLX5E_KLM_UMR_DS_CNT(ALIGN(klm_entries, KLM_ALIGNMENT)));
cseg->general_id = cpu_to_be32(id);
+ if (!klm_entries) { /* this is invalidate */
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ ucseg->flags = MLX5_UMR_INLINE;
+ mkc->status = MLX5_MKEY_STATUS_FREE;
+ return;
+ }
+
if (klm_type == KLM_UMR && !klm_offset) {
ucseg->mkey_mask |= cpu_to_be64(MLX5_MKEY_MASK_XLT_OCT_SIZE |
MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_FREE);
@@ -309,8 +317,8 @@ build_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
static void
mlx5e_nvmeotcp_fill_wi(struct mlx5e_nvmeotcp_queue *nvmeotcp_queue,
- struct mlx5e_icosq *sq, u32 wqe_bbs, u16 pi,
- enum wqe_type type)
+ struct mlx5e_icosq *sq, u32 wqe_bbs,
+ u16 pi, u16 ccid, enum wqe_type type)
{
struct mlx5e_icosq_wqe_info *wi = &sq->db.wqe_info[pi];
@@ -319,12 +327,17 @@ mlx5e_nvmeotcp_fill_wi(struct mlx5e_nvmeotcp_queue *nvmeotcp_queue,
case SET_PSV_UMR:
wi->wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_NVME_TCP;
break;
+ case KLM_INV_UMR:
+ wi->wqe_type = MLX5E_ICOSQ_WQE_UMR_NVME_TCP_INVALIDATE;
+ break;
default:
wi->wqe_type = MLX5E_ICOSQ_WQE_UMR_NVME_TCP;
break;
}
- if (type == SET_PSV_UMR)
+ if (type == KLM_INV_UMR)
+ wi->nvmeotcp_qe.entry = &nvmeotcp_queue->ccid_table[ccid];
+ else if (type == SET_PSV_UMR)
wi->nvmeotcp_q.queue = nvmeotcp_queue;
}
@@ -340,7 +353,7 @@ mlx5e_nvmeotcp_rx_post_static_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
wqe_bbs = MLX5E_NVMEOTCP_STATIC_PARAMS_WQEBBS;
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_STATIC_PARAMS_WQE(sq, pi);
- mlx5e_nvmeotcp_fill_wi(NULL, sq, wqe_bbs, pi, BSF_UMR);
+ mlx5e_nvmeotcp_fill_wi(NULL, sq, wqe_bbs, pi, 0, BSF_UMR);
build_nvmeotcp_static_params(queue, wqe, resync_seq, queue->zerocopy, queue->crc_rx);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
@@ -358,7 +371,7 @@ mlx5e_nvmeotcp_rx_post_progress_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
wqe_bbs = MLX5E_NVMEOTCP_PROGRESS_PARAMS_WQEBBS;
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_PROGRESS_PARAMS_WQE(sq, pi);
- mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, SET_PSV_UMR);
+ mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, 0, SET_PSV_UMR);
build_nvmeotcp_progress_params(queue, wqe, seq);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
@@ -382,7 +395,8 @@ post_klm_wqe(struct mlx5e_nvmeotcp_queue *queue,
wqe_bbs = DIV_ROUND_UP(wqe_sz, MLX5_SEND_WQE_BB);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_KLM_WQE(sq, pi);
- mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, wqe_type);
+ mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, ccid,
+ klm_length ? KLM_UMR : KLM_INV_UMR);
build_nvmeotcp_klm_umr(queue, wqe, ccid, cur_klm_entries, *klm_offset,
klm_length, wqe_type);
*klm_offset += cur_klm_entries;
@@ -400,8 +414,13 @@ mlx5e_nvmeotcp_post_klm_wqe(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_icosq *sq = &queue->sq->icosq;
/* TODO: set stricter wqe_sz; using max for now */
- wqes = DIV_ROUND_UP(klm_length, queue->max_klms_per_wqe);
- wqe_sz = MLX5E_KLM_UMR_WQE_SZ(queue->max_klms_per_wqe);
+ if (klm_length == 0) {
+ wqes = 1;
+ wqe_sz = MLX5E_NVMEOTCP_STATIC_PARAMS_WQEBBS;
+ } else {
+ wqes = DIV_ROUND_UP(klm_length, queue->max_klms_per_wqe);
+ wqe_sz = MLX5E_KLM_UMR_WQE_SZ(queue->max_klms_per_wqe);
+ }
max_wqe_bbs = DIV_ROUND_UP(wqe_sz, MLX5_SEND_WQE_BB);
@@ -746,6 +765,24 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
return 0;
}
+void mlx5e_nvmeotcp_ddp_inv_done(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct nvmeotcp_queue_entry *q_entry = wi->nvmeotcp_qe.entry;
+ struct mlx5e_nvmeotcp_queue *queue = q_entry->queue;
+ struct mlx5_core_dev *mdev = queue->priv->mdev;
+ struct ulp_ddp_io *ddp = q_entry->ddp;
+ const struct ulp_ddp_ulp_ops *ulp_ops;
+
+ dma_unmap_sg(mdev->device, ddp->sg_table.sgl,
+ q_entry->sgl_length, DMA_FROM_DEVICE);
+
+ q_entry->sgl_length = 0;
+
+ ulp_ops = inet_csk(queue->sk)->icsk_ulp_ddp_ops;
+ if (ulp_ops && ulp_ops->ddp_teardown_done)
+ ulp_ops->ddp_teardown_done(q_entry->ddp_ctx);
+}
+
void mlx5e_nvmeotcp_ctx_comp(struct mlx5e_icosq_wqe_info *wi)
{
struct mlx5e_nvmeotcp_queue *queue = wi->nvmeotcp_q.queue;
@@ -762,6 +799,19 @@ mlx5e_nvmeotcp_ddp_teardown(struct net_device *netdev,
struct ulp_ddp_io *ddp,
void *ddp_ctx)
{
+ struct mlx5e_nvmeotcp_queue *queue;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct nvmeotcp_queue_entry *q_entry;
+
+ queue = container_of(ulp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+ q_entry = &queue->ccid_table[ddp->command_id];
+ WARN_ON(q_entry->sgl_length == 0);
+
+ q_entry->ddp_ctx = ddp_ctx;
+ q_entry->queue = queue;
+
+ mlx5e_nvmeotcp_post_klm_wqe(queue, KLM_UMR, ddp->command_id, 0);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
index 20141010817d..b9642e130b97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
@@ -105,6 +105,7 @@ void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv);
struct mlx5e_nvmeotcp_queue *
mlx5e_nvmeotcp_get_queue(struct mlx5e_nvmeotcp *nvmeotcp, int id);
void mlx5e_nvmeotcp_put_queue(struct mlx5e_nvmeotcp_queue *queue);
+void mlx5e_nvmeotcp_ddp_inv_done(struct mlx5e_icosq_wqe_info *wi);
void mlx5e_nvmeotcp_ctx_comp(struct mlx5e_icosq_wqe_info *wi);
int mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv);
void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bc7b19974ed9..e92dd4666955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -628,6 +628,9 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
#ifdef CONFIG_MLX5_EN_NVMEOTCP
case MLX5E_ICOSQ_WQE_UMR_NVME_TCP:
break;
+ case MLX5E_ICOSQ_WQE_UMR_NVME_TCP_INVALIDATE:
+ mlx5e_nvmeotcp_ddp_inv_done(wi);
+ break;
case MLX5E_ICOSQ_WQE_SET_PSV_NVME_TCP:
mlx5e_nvmeotcp_ctx_comp(wi);
break;
@@ -706,6 +709,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
#ifdef CONFIG_MLX5_EN_NVMEOTCP
case MLX5E_ICOSQ_WQE_UMR_NVME_TCP:
break;
+ case MLX5E_ICOSQ_WQE_UMR_NVME_TCP_INVALIDATE:
+ mlx5e_nvmeotcp_ddp_inv_done(wi);
+ break;
case MLX5E_ICOSQ_WQE_SET_PSV_NVME_TCP:
mlx5e_nvmeotcp_ctx_comp(wi);
break;
--
2.24.1
More information about the Linux-nvme
mailing list