[bug report] nvmet-fc: track hostport handle for associations
Dan Carpenter
dan.carpenter at linaro.org
Tue Feb 6 05:09:38 PST 2024
Hello James Smart,
This warning is quite old but I don't think I've ever reported it
before.
The patch 58ab8ff9dca2: "nvmet-fc: track hostport handle for
associations" from Mar 31, 2020 (linux-next), leads to the following
Smatch static checker warning:
drivers/nvme/target/fc.c:950 nvmet_fc_delete_target_queue()
warn: sleeping in atomic context
The call tree this:
nvmet_fc_invalidate_host() <- disables preempt
-> nvmet_fc_tgt_a_put()
-> nvmet_fc_target_assoc_free()
-> nvmet_fc_delete_target_queue()
drivers/nvme/target/fc.c
1546 void
1547 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1548 void *hosthandle)
1549 {
1550 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1551 struct nvmet_fc_tgt_assoc *assoc, *next;
1552 unsigned long flags;
1553 bool noassoc = true;
1554
1555 spin_lock_irqsave(&tgtport->lock, flags);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Holding a spin lock.
1556 list_for_each_entry_safe(assoc, next,
1557 &tgtport->assoc_list, a_list) {
1558 if (assoc->hostport->hosthandle != hosthandle)
1559 continue;
1560 if (!nvmet_fc_tgt_a_get(assoc))
1561 continue;
1562 assoc->hostport->invalid = 1;
1563 noassoc = false;
1564 nvmet_fc_schedule_delete_assoc(assoc);
1565 nvmet_fc_tgt_a_put(assoc);
^^^^^^^^^^^^^^^^^^^^^^^^^
This will eventually sleep I think.
1566 }
1567 spin_unlock_irqrestore(&tgtport->lock, flags);
1568
1569 /* if there's nothing to wait for - call the callback */
1570 if (noassoc && tgtport->ops->host_release)
1571 tgtport->ops->host_release(hosthandle);
1572 }
drivers/nvme/target/fc.c
877 static void
878 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
879 {
880 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
881 struct nvmet_fc_fcp_iod *fod = queue->fod;
882 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
883 unsigned long flags;
884 int i;
885 bool disconnect;
886
887 disconnect = atomic_xchg(&queue->connected, 0);
888
889 /* if not connected, nothing to do */
890 if (!disconnect)
891 return;
892
893 spin_lock_irqsave(&queue->qlock, flags);
894 /* abort outstanding io's */
895 for (i = 0; i < queue->sqsize; fod++, i++) {
896 if (fod->active) {
897 spin_lock(&fod->flock);
898 fod->abort = true;
899 /*
900 * only call lldd abort routine if waiting for
901 * writedata. other outstanding ops should finish
902 * on their own.
903 */
904 if (fod->writedataactive) {
905 fod->aborted = true;
906 spin_unlock(&fod->flock);
907 tgtport->ops->fcp_abort(
908 &tgtport->fc_target_port, fod->fcpreq);
909 } else
910 spin_unlock(&fod->flock);
911 }
912 }
913
914 /* Cleanup defer'ed IOs in queue */
915 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
916 req_list) {
917 list_del(&deferfcp->req_list);
918 kfree(deferfcp);
919 }
920
921 for (;;) {
922 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
923 struct nvmet_fc_defer_fcp_req, req_list);
924 if (!deferfcp)
925 break;
926
927 list_del(&deferfcp->req_list);
928 spin_unlock_irqrestore(&queue->qlock, flags);
929
930 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
931 deferfcp->fcp_req);
932
933 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
934 deferfcp->fcp_req);
935
936 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
937 deferfcp->fcp_req);
938
939 /* release the queue lookup reference */
940 nvmet_fc_tgt_q_put(queue);
941
942 kfree(deferfcp);
943
944 spin_lock_irqsave(&queue->qlock, flags);
945 }
946 spin_unlock_irqrestore(&queue->qlock, flags);
947
948 flush_workqueue(queue->work_q);
949
--> 950 nvmet_sq_destroy(&queue->nvme_sq);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sleeps.
951
952 nvmet_fc_tgt_q_put(queue);
953 }
regards,
dan carpenter
More information about the Linux-nvme
mailing list