[PATCH] nvme-fcloop: fix "inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage"

Yi Zhang yi.zhang at redhat.com
Wed Apr 12 09:53:32 PDT 2023


Thanks Ming, below issue was fixed by this patch, feel free to add:

Reported-by: Yi Zhang <yi.zhang at redhat.com>
Tested-by: Yi Zhang <yi.zhang at redhat.com>

[ 6238.578857] run blktests nvme/012 at 2023-04-12 07:47:19
[ 6240.616008] ------------[ cut here ]------------
[ 6240.620657] raw_local_irq_restore() called with IRQs enabled
[ 6240.626331] WARNING: CPU: 25 PID: 184 at
kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x21/0x30
[ 6240.636082] Modules linked in: loop nvme_fcloop nvmet_fc nvmet
nvme_fc nvme_fabrics nvme_core nvme_common rpcsec_gss_krb5 auth_rpcgss
nfsv4 dns_resolver nfs lockd grace fscache netfs rfkill sunrpc vfat
fat dm_multipath intel_rapl_msr intel_rapl_common
intel_uncore_frequency intel_uncore_frequency_common isst_if_common
skx_edac x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm
ipmi_ssif mgag200 i2c_algo_bit drm_shmem_helper drm_kms_helper
irqbypass iTCO_wdt syscopyarea rapl mei_me iTCO_vendor_support
sysfillrect dell_smbios intel_cstate dcdbas wmi_bmof
dell_wmi_descriptor intel_uncore acpi_ipmi pcspkr sysimgblt mei
i2c_i801 ipmi_si lpc_ich i2c_smbus ipmi_devintf ipmi_msghandler
dax_pmem acpi_power_meter drm fuse xfs libcrc32c nd_pmem nd_btt sd_mod
t10_pi sg crct10dif_pclmul crc32_pclmul crc32c_intel ahci libahci
ghash_clmulni_intel tg3 megaraid_sas libata wmi nfit libnvdimm
dm_mirror dm_region_hash dm_log dm_mod
[ 6240.717780] CPU: 25 PID: 184 Comm: ksoftirqd/25 Kdump: loaded Not
tainted 6.3.0-rc5+ #1
[ 6240.725787] Hardware name: Dell Inc. PowerEdge R640/06NR82, BIOS
2.17.1 11/15/2022
[ 6240.733360] RIP: 0010:warn_bogus_irq_restore+0x21/0x30
[ 6240.738509] Code: 90 90 90 90 90 90 90 90 90 80 3d fa 4d d8 01 00
74 05 c3 cc cc cc cc 48 c7 c7 a0 29 ce 8c c6 05 e5 4d d8 01 01 e8 df
d1 c7 fd <0f> 0b c3 cc cc cc cc cc cc cc cc cc cc cc cc 90 90 90 90 90
90 90
[ 6240.757264] RSP: 0018:ffffc90001257d60 EFLAGS: 00010286
[ 6240.762506] RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000000
[ 6240.769647] RDX: 0000000000000103 RSI: ffffffff8cf64b60 RDI: 0000000000000001
[ 6240.776787] RBP: ffff8884dd61a048 R08: 0000000000000001 R09: ffffc90001257b5f
[ 6240.783930] R10: fffff5200024af6b R11: 0000000000000001 R12: ffff8884dd61a010
[ 6240.791070] R13: dffffc0000000000 R14: ffff8884dd619f50 R15: ffff8884dd619f50
[ 6240.798210] FS:  0000000000000000(0000) GS:ffff888efe400000(0000)
knlGS:0000000000000000
[ 6240.806306] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 6240.812066] CR2: 000055c5feb5c0a0 CR3: 00000003a0a32002 CR4: 00000000007706e0
[ 6240.819211] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 6240.826350] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 6240.833493] PKRU: 55555554
[ 6240.836215] Call Trace:
[ 6240.838674]  <TASK>
[ 6240.840791]  _raw_spin_unlock_irqrestore+0x6c/0x70
[ 6240.845600]  flush_end_io+0x88f/0xb70
[ 6240.849290]  ? __pfx_flush_end_io+0x10/0x10
[ 6240.853486]  blk_mq_end_request+0x145/0x400
[ 6240.857686]  blk_complete_reqs+0xa7/0xe0
[ 6240.861626]  __do_softirq+0x200/0x8f6
[ 6240.865311]  ? __pfx_run_ksoftirqd+0x10/0x10
[ 6240.869600]  ? smpboot_thread_fn+0x6b/0x910
[ 6240.873799]  run_ksoftirqd+0x36/0x60
[ 6240.877388]  smpboot_thread_fn+0x556/0x910
[ 6240.881498]  ? __pfx_smpboot_thread_fn+0x10/0x10
[ 6240.886134]  kthread+0x29f/0x340
[ 6240.889381]  ? __pfx_kthread+0x10/0x10
[ 6240.893145]  ret_from_fork+0x29/0x50
[ 6240.896756]  </TASK>
[ 6240.898959] irq event stamp: 715386
[ 6240.902459] hardirqs last  enabled at (715396):
[<ffffffff8a7f600b>] __up_console_sem+0x6b/0x80
[ 6240.911160] hardirqs last disabled at (715405):
[<ffffffff8a7f5ff0>] __up_console_sem+0x50/0x80
[ 6240.919862] softirqs last  enabled at (714456):
[<ffffffff8c9e6d9b>] __do_softirq+0x5db/0x8f6
[ 6240.928388] softirqs last disabled at (714461):
[<ffffffff8a646b16>] run_ksoftirqd+0x36/0x60
[ 6240.936828] ---[ end trace 0000000000000000 ]---
On Wed, Apr 12, 2023 at 4:52 PM Ming Lei <ming.lei at redhat.com> wrote:
>
> fcloop_fcp_op() could be called from flush request's ->end_io(flush_end_io) in
> which the spinlock of fq->mq_flush_lock is grabbed with irq saved/disabled.
>
> So fcloop_fcp_op() can't call spin_unlock_irq(&tfcp_req->reqlock) simply
> which enables irq unconditionally.
>
> Fixes the warning by switching to spin_lock_irqsave()/spin_unlock_irqrestore()
>
> Fixes: c38dbbfab1bc ("nvme-fcloop: fix inconsistent lock state warnings")
> Cc: James Smart <jsmart2021 at gmail.com>
> Cc: Minwoo Im <minwoo.im.dev at gmail.com>
> Signed-off-by: Ming Lei <ming.lei at redhat.com>
> ---
>  drivers/nvme/target/fcloop.c | 48 ++++++++++++++++++++----------------
>  1 file changed, 27 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 5c16372f3b53..c780af36c1d4 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
>         struct fcloop_fcpreq *tfcp_req =
>                 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
>         struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
> +       unsigned long flags;
>         int ret = 0;
>         bool aborted = false;
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         switch (tfcp_req->inistate) {
>         case INI_IO_START:
>                 tfcp_req->inistate = INI_IO_ACTIVE;
> @@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
>                 aborted = true;
>                 break;
>         default:
> -               spin_unlock_irq(&tfcp_req->reqlock);
> +               spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>                 WARN_ON(1);
>                 return;
>         }
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         if (unlikely(aborted))
>                 ret = -ECANCELED;
> @@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
>                 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
>         struct nvmefc_fcp_req *fcpreq;
>         bool completed = false;
> +       unsigned long flags;
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         fcpreq = tfcp_req->fcpreq;
>         switch (tfcp_req->inistate) {
>         case INI_IO_ABORTED:
> @@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
>                 completed = true;
>                 break;
>         default:
> -               spin_unlock_irq(&tfcp_req->reqlock);
> +               spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>                 WARN_ON(1);
>                 return;
>         }
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         if (unlikely(completed)) {
>                 /* remove reference taken in original abort downcall */
> @@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
>                 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
>                                         &tfcp_req->tgt_fcp_req);
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         tfcp_req->fcpreq = NULL;
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
>         /* call_host_done releases reference for abort downcall */
> @@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
>         struct fcloop_fcpreq *tfcp_req =
>                 container_of(work, struct fcloop_fcpreq, tio_done_work);
>         struct nvmefc_fcp_req *fcpreq;
> +       unsigned long flags;
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         fcpreq = tfcp_req->fcpreq;
>         tfcp_req->inistate = INI_IO_COMPLETED;
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
>  }
> @@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
>         u32 rsplen = 0, xfrlen = 0;
>         int fcp_err = 0, active, aborted;
>         u8 op = tgt_fcpreq->op;
> +       unsigned long flags;
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         fcpreq = tfcp_req->fcpreq;
>         active = tfcp_req->active;
>         aborted = tfcp_req->aborted;
>         tfcp_req->active = true;
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         if (unlikely(active))
>                 /* illegal - call while i/o active */
> @@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
>
>         if (unlikely(aborted)) {
>                 /* target transport has aborted i/o prior */
> -               spin_lock_irq(&tfcp_req->reqlock);
> +               spin_lock_irqsave(&tfcp_req->reqlock, flags);
>                 tfcp_req->active = false;
> -               spin_unlock_irq(&tfcp_req->reqlock);
> +               spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>                 tgt_fcpreq->transferred_length = 0;
>                 tgt_fcpreq->fcp_error = -ECANCELED;
>                 tgt_fcpreq->done(tgt_fcpreq);
> @@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
>                 break;
>         }
>
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         tfcp_req->active = false;
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         tgt_fcpreq->transferred_length = xfrlen;
>         tgt_fcpreq->fcp_error = fcp_err;
> @@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
>                         struct nvmefc_tgt_fcp_req *tgt_fcpreq)
>  {
>         struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
> +       unsigned long flags;
>
>         /*
>          * mark aborted only in case there were 2 threads in transport
>          * (one doing io, other doing abort) and only kills ops posted
>          * after the abort request
>          */
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         tfcp_req->aborted = true;
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         tfcp_req->status = NVME_SC_INTERNAL;
>
> @@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
>         struct fcloop_ini_fcpreq *inireq = fcpreq->private;
>         struct fcloop_fcpreq *tfcp_req;
>         bool abortio = true;
> +       unsigned long flags;
>
>         spin_lock(&inireq->inilock);
>         tfcp_req = inireq->tfcp_req;
> @@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
>                 return;
>
>         /* break initiator/target relationship for io */
> -       spin_lock_irq(&tfcp_req->reqlock);
> +       spin_lock_irqsave(&tfcp_req->reqlock, flags);
>         switch (tfcp_req->inistate) {
>         case INI_IO_START:
>         case INI_IO_ACTIVE:
> @@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
>                 abortio = false;
>                 break;
>         default:
> -               spin_unlock_irq(&tfcp_req->reqlock);
> +               spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>                 WARN_ON(1);
>                 return;
>         }
> -       spin_unlock_irq(&tfcp_req->reqlock);
> +       spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
>
>         if (abortio)
>                 /* leave the reference while the work item is scheduled */
> --
> 2.31.1
>
>


-- 
Best Regards,
  Yi Zhang




More information about the Linux-nvme mailing list