[PATCH v4 net-next 06/21] nvme-tcp: Add DDP offload control path
Or Gerlitz
gerlitz.or at gmail.com
Wed Feb 17 08:55:51 EST 2021
On Sun, Feb 14, 2021 at 8:20 PM David Ahern <dsahern at gmail.com> wrote:
> On 2/11/21 2:10 PM, Boris Pismenny wrote:
> > @@ -223,6 +229,164 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
> > return nvme_tcp_pdu_data_left(req) <= len;
> > }
> >
> > +#ifdef CONFIG_TCP_DDP
> > +
> > +static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
> > +static const struct tcp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
> > + .resync_request = nvme_tcp_resync_request,
> > +};
> > +
> > +static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
> > +{
> > + struct net_device *netdev = queue->ctrl->offloading_netdev;
> > + struct nvme_tcp_ddp_config config = {};
> > + int ret;
> > +
> > + if (!(netdev->features & NETIF_F_HW_TCP_DDP))
>
> If nvme_tcp_offload_limits does not find a dst_entry on the socket then
> offloading_netdev may not NULL at this point.
correct :( will look on that
>
> > + return -EOPNOTSUPP;
> > +
> > + config.cfg.type = TCP_DDP_NVME;
> > + config.pfv = NVME_TCP_PFV_1_0;
> > + config.cpda = 0;
> > + config.dgst = queue->hdr_digest ?
> > + NVME_TCP_HDR_DIGEST_ENABLE : 0;
> > + config.dgst |= queue->data_digest ?
> > + NVME_TCP_DATA_DIGEST_ENABLE : 0;
> > + config.queue_size = queue->queue_size;
> > + config.queue_id = nvme_tcp_queue_id(queue);
> > + config.io_cpu = queue->io_cpu;
> > +
> > + dev_hold(netdev); /* put by unoffload_socket */
> > + ret = netdev->tcp_ddp_ops->tcp_ddp_sk_add(netdev,
> > + queue->sock->sk,
> > + &config.cfg);
> > + if (ret) {
> > + dev_put(netdev);
> > + return ret;
> > + }
> > +
> > + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops;
> > + if (netdev->features & NETIF_F_HW_TCP_DDP)
> > + set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags);
> > +
> > + return ret;
> > +}
> > +
> > +static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue)
> > +{
> > + struct net_device *netdev = queue->ctrl->offloading_netdev;
> > +
> > + if (!netdev) {
> > + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n");
>
> you are already logged in nvme_tcp_offload_limits that
> get_netdev_for_sock returned NULL; no need to do it again.
yeah, re this one and the few other places where you commented
on the same or similar thing, I tend to agree we need to go on the
kernel trusted programming paradigm and avoid over checking, will
discuss that with the team.
> > + return;
> > + }
> > +
> > + netdev->tcp_ddp_ops->tcp_ddp_sk_del(netdev, queue->sock->sk);
> > +
> > + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL;
> > + dev_put(netdev); /* held by offload_socket */
> > +}
> > +
> > +static int nvme_tcp_offload_limits(struct nvme_tcp_queue *queue)
> > +{
> > + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true);
> > + struct tcp_ddp_limits limits;
> > + int ret = 0;
> > +
> > + if (!netdev) {
> > + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n");
>
> This should be more informative.
okk
> > + queue->ctrl->offloading_netdev = NULL;
> > + return -ENODEV;
> > + }
> > +
> > + if (netdev->features & NETIF_F_HW_TCP_DDP &&
> > + netdev->tcp_ddp_ops &&
> > + netdev->tcp_ddp_ops->tcp_ddp_limits)
> > + ret = netdev->tcp_ddp_ops->tcp_ddp_limits(netdev, &limits);
> > + else
> > + ret = -EOPNOTSUPP;
> > +
> > + if (!ret) {
> > + queue->ctrl->offloading_netdev = netdev;
>
> you save a reference to the netdev here, but then release the refcnt
> below. That device could be deleted between this point in time and the
> initialization of all queues.
> > + dev_dbg_ratelimited(queue->ctrl->ctrl.device,
> > + "netdev %s offload limits: max_ddp_sgl_len %d\n",
> > + netdev->name, limits.max_ddp_sgl_len);
> > + queue->ctrl->ctrl.max_segments = limits.max_ddp_sgl_len;
> > + queue->ctrl->ctrl.max_hw_sectors =
> > + limits.max_ddp_sgl_len << (ilog2(SZ_4K) - 9);
> > + } else {
> > + queue->ctrl->offloading_netdev = NULL;
> > + }
> > +
> > + /* release the device as no offload context is established yet. */
> > + dev_put(netdev);
> > +
> > + return ret;
> > +}
> > +
> > +static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue,
> > + struct sk_buff *skb, unsigned int offset)
> > +{
> > + u64 pdu_seq = TCP_SKB_CB(skb)->seq + offset - queue->pdu_offset;
> > + struct net_device *netdev = queue->ctrl->offloading_netdev;
> > + u64 resync_val;
> > + u32 resync_seq;
> > +
> > + resync_val = atomic64_read(&queue->resync_req);
> > + /* Lower 32 bit flags. Check validity of the request */
> > + if ((resync_val & TCP_DDP_RESYNC_REQ) == 0)
> > + return;
> > +
> > + /* Obtain and check requested sequence number: is this PDU header before the request? */
> > + resync_seq = resync_val >> 32;
> > + if (before(pdu_seq, resync_seq))
> > + return;
> > +
> > + if (unlikely(!netdev)) {
> > + pr_info_ratelimited("%s: netdev not found\n", __func__);
>
> can't happen right? you get here because NVME_TCP_Q_OFF_DDP is set and
> it is only set if offloading_netdev is set and the device supports offload.
as I wrote above, will discuss this general comment and likely
go the direction you are pointing on
More information about the Linux-nvme
mailing list