[PATCH v11 09/25] nvme-tcp: RX DDGST offload
Aurelien Aptel
aaptel at nvidia.com
Fri Feb 3 05:26:49 PST 2023
From: Yoray Zack <yorayz at nvidia.com>
Enable rx side of DDGST offload when supported.
At the end of the capsule, check if all the skb bits are on, and if not
recalculate the DDGST in SW and check it.
Signed-off-by: Yoray Zack <yorayz at nvidia.com>
Signed-off-by: Boris Pismenny <borisp at nvidia.com>
Signed-off-by: Ben Ben-Ishay <benishay at nvidia.com>
Signed-off-by: Or Gerlitz <ogerlitz at nvidia.com>
Signed-off-by: Shai Malin <smalin at nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel at nvidia.com>
Reviewed-by: Chaitanya Kulkarni <kch at nvidia.com>
---
drivers/nvme/host/tcp.c | 142 ++++++++++++++++++++++++++++++++++++----
1 file changed, 128 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 533177971777..7e3feb694e46 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -116,6 +116,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
NVME_TCP_Q_OFF_DDP = 3,
+ NVME_TCP_Q_OFF_DDGST_RX = 4,
};
enum nvme_tcp_recv_state {
@@ -143,6 +144,9 @@ struct nvme_tcp_queue {
size_t ddgst_remaining;
unsigned int nr_cqe;
+#ifdef CONFIG_ULP_DDP
+ bool ddp_ddgst_valid;
+
/*
* resync_req is a speculative PDU header tcp seq number (with
* an additional flag at 32 lower bits) that the HW send to
@@ -152,6 +156,7 @@ struct nvme_tcp_queue {
* is pending (ULP_DDP_RESYNC_PENDING).
*/
atomic64_t resync_req;
+#endif
/* send state */
struct nvme_tcp_request *request;
@@ -288,9 +293,22 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
#ifdef CONFIG_ULP_DDP
-static inline bool is_netdev_ulp_offload_active(struct net_device *netdev)
+static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
+ struct nvme_tcp_queue *queue)
{
- return test_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active);
+ bool ddgst_offload;
+
+ if (test_bit(ULP_DDP_C_NVME_TCP_BIT, netdev->ulp_ddp_caps.active))
+ return true;
+
+ ddgst_offload = test_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT,
+ netdev->ulp_ddp_caps.active);
+ if (!queue && ddgst_offload)
+ return true;
+ if (queue && queue->data_digest && ddgst_offload)
+ return true;
+
+ return false;
}
static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
@@ -298,7 +316,7 @@ static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
{
int ret;
- if (!netdev || !is_netdev_ulp_offload_active(netdev) ||
+ if (!netdev || !is_netdev_ulp_offload_active(netdev, NULL) ||
!netdev->netdev_ops->ulp_ddp_ops->limits)
return false;
@@ -314,6 +332,18 @@ static bool nvme_tcp_ddp_query_limits(struct net_device *netdev,
return true;
}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return queue->ddp_ddgst_valid;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{
+ if (queue->ddp_ddgst_valid)
+ queue->ddp_ddgst_valid = skb_is_ulp_crc(skb);
+}
+
static int nvme_tcp_req_map_sg(struct nvme_tcp_request *req, struct request *rq)
{
int ret;
@@ -328,6 +358,38 @@ static int nvme_tcp_req_map_sg(struct nvme_tcp_request *req, struct request *rq)
return 0;
}
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{
+ struct nvme_tcp_request *req;
+
+ if (!rq)
+ return;
+
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (!req->offloaded) {
+ /* if we have DDGST_RX offload without DDP the request
+ * wasn't mapped, so we need to map it here
+ */
+ if (nvme_tcp_req_map_sg(req, rq))
+ return;
+ }
+
+ req->ddp.sg_table.sgl = req->ddp.first_sgl;
+ ahash_request_set_crypt(hash, req->ddp.sg_table.sgl, (u8 *)ddgst,
+ req->data_len);
+ crypto_ahash_digest(hash);
+
+ if (!req->offloaded) {
+ /* without DDP, ddp_teardown() won't be called, so
+ * free the table here
+ */
+ sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+ }
+}
+
static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
@@ -387,6 +449,10 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
struct net_device *netdev = queue->ctrl->offloading_netdev;
struct ulp_ddp_config config = {.type = ULP_DDP_NVME};
+ bool offload_ddp = test_bit(ULP_DDP_C_NVME_TCP_BIT,
+ netdev->ulp_ddp_caps.active);
+ bool offload_ddgst_rx = test_bit(ULP_DDP_C_NVME_TCP_DDGST_RX_BIT,
+ netdev->ulp_ddp_caps.active);
int ret;
config.nvmeotcp.pfv = NVME_TCP_PFV_1_0;
@@ -413,7 +479,10 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
}
inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops;
- set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ if (offload_ddp)
+ set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ if (queue->data_digest && offload_ddgst_rx)
+ set_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
return 0;
}
@@ -427,6 +496,7 @@ static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue)
}
clear_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
+ clear_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags);
netdev->netdev_ops->ulp_ddp_ops->sk_del(netdev, queue->sock->sk);
@@ -519,11 +589,26 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags)
#else
-static inline bool is_netdev_ulp_offload_active(struct net_device *netdev)
+static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
+ struct nvme_tcp_queue *queue)
{
return false;
}
+static inline bool nvme_tcp_ddp_ddgst_ok(struct nvme_tcp_queue *queue)
+{
+ return true;
+}
+
+static inline void nvme_tcp_ddp_ddgst_update(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb)
+{}
+
+static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash,
+ struct request *rq,
+ __le32 *ddgst)
+{}
+
static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
struct request *rq)
{
@@ -806,6 +891,9 @@ static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue->pdu_offset = 0;
queue->data_remaining = -1;
queue->ddgst_remaining = 0;
+#ifdef CONFIG_ULP_DDP
+ queue->ddp_ddgst_valid = true;
+#endif
}
static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
@@ -1009,7 +1097,8 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
int ret;
- if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags))
+ if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) ||
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
nvme_tcp_resync_response(queue, skb, *offset);
ret = skb_copy_bits(skb, *offset,
@@ -1073,6 +1162,10 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ if (queue->data_digest &&
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
+
while (true) {
int recv_len, ret;
@@ -1101,7 +1194,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
recv_len = min_t(size_t, recv_len,
iov_iter_count(&req->iter));
- if (queue->data_digest)
+ if (queue->data_digest &&
+ !test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
ret = skb_copy_and_hash_datagram_iter(skb, *offset,
&req->iter, recv_len, queue->rcv_hash);
else
@@ -1143,8 +1237,11 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ struct request *rq;
int ret;
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
+ nvme_tcp_ddp_ddgst_update(queue, skb);
ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
if (unlikely(ret))
return ret;
@@ -1155,9 +1252,25 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
if (queue->ddgst_remaining)
return 0;
+ rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+
+ if (test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags)) {
+ /*
+ * If HW successfully offloaded the digest
+ * verification, we can skip it
+ */
+ if (nvme_tcp_ddp_ddgst_ok(queue))
+ goto out;
+ /*
+ * Otherwise we have to recalculate and verify the
+ * digest with the software-fallback
+ */
+ nvme_tcp_ddp_ddgst_recalc(queue->rcv_hash, rq,
+ &queue->exp_ddgst);
+ }
+
if (queue->recv_ddgst != queue->exp_ddgst) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
@@ -1168,9 +1281,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
le32_to_cpu(queue->exp_ddgst));
}
+out:
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
@@ -1981,7 +2093,8 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
nvme_tcp_restore_sock_calls(queue);
cancel_work_sync(&queue->io_work);
- if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags))
+ if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags) ||
+ test_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags))
nvme_tcp_unoffload_socket(queue);
}
@@ -2011,7 +2124,8 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
goto err;
netdev = ctrl->queues[idx].ctrl->offloading_netdev;
- if (netdev && is_netdev_ulp_offload_active(netdev)) {
+ if (netdev &&
+ is_netdev_ulp_offload_active(netdev, &ctrl->queues[idx])) {
ret = nvme_tcp_offload_socket(&ctrl->queues[idx]);
if (ret) {
dev_err(nctrl->device,
@@ -2030,7 +2144,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
ctrl->offloading_netdev = NULL;
goto done;
}
- if (is_netdev_ulp_offload_active(netdev))
+ if (is_netdev_ulp_offload_active(netdev, &ctrl->queues[idx]))
nvme_tcp_offload_limits(&ctrl->queues[idx], netdev);
/*
* release the device as no offload context is
--
2.31.1
More information about the Linux-nvme
mailing list