[PATCH 4/5] nvmet-rdma: add a NVMe over Fabrics RDMA target driver
Christoph Hellwig
hch at infradead.org
Tue Jun 14 07:32:48 PDT 2016
On Thu, Jun 09, 2016 at 04:42:11PM -0500, Steve Wise wrote:
>
> <snip>
>
> > > +
> > > +static struct nvmet_rdma_queue *
> > > +nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
> > > + struct rdma_cm_id *cm_id,
> > > + struct rdma_cm_event *event)
> > > +{
> > > + struct nvmet_rdma_queue *queue;
> > > + int ret;
> > > +
> > > + queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> > > + if (!queue) {
> > > + ret = NVME_RDMA_CM_NO_RSC;
> > > + goto out_reject;
> > > + }
> > > +
> > > + ret = nvmet_sq_init(&queue->nvme_sq);
> > > + if (ret)
> > > + goto out_free_queue;
> > > +
> > > + ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn,
> > queue);
> > > + if (ret)
> > > + goto out_destroy_sq;
> > > +
> > > + /*
> > > + * Schedules the actual release because calling rdma_destroy_id from
> > > + * inside a CM callback would trigger a deadlock. (great API
> design..)
> > > + */
> > > + INIT_WORK(&queue->release_work,
> > nvmet_rdma_release_queue_work);
> > > + queue->dev = ndev;
> > > + queue->cm_id = cm_id;
> > > +
> > > + spin_lock_init(&queue->state_lock);
> > > + queue->state = NVMET_RDMA_Q_CONNECTING;
> > > + INIT_LIST_HEAD(&queue->rsp_wait_list);
> > > + INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
> > > + spin_lock_init(&queue->rsp_wr_wait_lock);
> > > + INIT_LIST_HEAD(&queue->free_rsps);
> > > + spin_lock_init(&queue->rsps_lock);
> > > +
> > > + queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0,
> > GFP_KERNEL);
> > > + if (queue->idx < 0) {
> > > + ret = NVME_RDMA_CM_NO_RSC;
> > > + goto out_free_queue;
> > > + }
> > > +
> > > + ret = nvmet_rdma_alloc_rsps(queue);
> > > + if (ret) {
> > > + ret = NVME_RDMA_CM_NO_RSC;
> > > + goto out_ida_remove;
> > > + }
> > > +
> > > + if (!ndev->srq) {
> > > + queue->cmds = nvmet_rdma_alloc_cmds(ndev,
> > > + queue->recv_queue_size,
> > > + !queue->host_qid);
> > > + if (IS_ERR(queue->cmds)) {
> > > + ret = NVME_RDMA_CM_NO_RSC;
> > > + goto out_free_cmds;
> > > + }
> > > + }
> > > +
>
> Should the above error path actually goto a block that frees the rsps? Like
> this?
Yes, this looks good. Thanks a lot, I'll include it in when reposting.
More information about the Linux-nvme
mailing list