[linux-nvme:nvme-6.14 2/36] drivers/nvme/host/tcp.c:1583:18: warning: variable 'n' set but not used

kernel test robot lkp at intel.com
Wed Jan 8 17:36:26 PST 2025


tree:   git://git.infradead.org/nvme.git nvme-6.14
head:   b9973aa4d0507c4969ad87763b535edb77b7dceb
commit: bd0f5c10310122f4f1d9468467a59a6f69e46f92 [2/36] nvme-tcp: Fix I/O queue cpu spreading for multiple controllers
config: s390-randconfig-001-20250109 (https://download.01.org/0day-ci/archive/20250109/202501090934.Dhw69M6j-lkp@intel.com/config)
compiler: s390-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250109/202501090934.Dhw69M6j-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp at intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501090934.Dhw69M6j-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/nvme/host/tcp.c: In function 'nvme_tcp_set_queue_io_cpu':
>> drivers/nvme/host/tcp.c:1583:18: warning: variable 'n' set but not used [-Wunused-but-set-variable]
    1583 |         int cpu, n = 0, min_queues = INT_MAX, io_cpu;
         |                  ^
--
>> drivers/nvme/host/tcp.c:1578: warning: Function parameter or struct member 'queue' not described in 'nvme_tcp_set_queue_io_cpu'
>> drivers/nvme/host/tcp.c:1578: warning: expecting prototype for Track the number of queues assigned to each cpu using a global per(). Prototype was for nvme_tcp_set_queue_io_cpu() instead


vim +/n +1583 drivers/nvme/host/tcp.c

  1567	
  1568	/**
  1569	 * Track the number of queues assigned to each cpu using a global per-cpu
  1570	 * counter and select the least used cpu from the mq_map. Our goal is to spread
  1571	 * different controllers I/O threads across different cpu cores.
  1572	 *
  1573	 * Note that the accounting is not 100% perfect, but we don't need to be, we're
  1574	 * simply putting our best effort to select the best candidate cpu core that we
  1575	 * find at any given point.
  1576	 */
  1577	static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
> 1578	{
  1579		struct nvme_tcp_ctrl *ctrl = queue->ctrl;
  1580		struct blk_mq_tag_set *set = &ctrl->tag_set;
  1581		int qid = nvme_tcp_queue_id(queue) - 1;
  1582		unsigned int *mq_map;
> 1583		int cpu, n = 0, min_queues = INT_MAX, io_cpu;
  1584	
  1585		if (wq_unbound)
  1586			goto out;
  1587	
  1588		if (nvme_tcp_default_queue(queue)) {
  1589			mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
  1590			n = qid;
  1591		} else if (nvme_tcp_read_queue(queue)) {
  1592			mq_map = set->map[HCTX_TYPE_READ].mq_map;
  1593			n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT];
  1594		} else if (nvme_tcp_poll_queue(queue)) {
  1595			mq_map = set->map[HCTX_TYPE_POLL].mq_map;
  1596			n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
  1597					ctrl->io_queues[HCTX_TYPE_READ];
  1598		}
  1599		if (WARN_ON(!mq_map))
  1600			goto out;
  1601	
  1602		/* Search for the least used cpu from the mq_map */
  1603		io_cpu = WORK_CPU_UNBOUND;
  1604		for_each_online_cpu(cpu) {
  1605			int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
  1606	
  1607			if (mq_map[cpu] != qid)
  1608				continue;
  1609			if (num_queues < min_queues) {
  1610				io_cpu = cpu;
  1611				min_queues = num_queues;
  1612			}
  1613		}
  1614		if (io_cpu != WORK_CPU_UNBOUND) {
  1615			queue->io_cpu = io_cpu;
  1616			atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
  1617			set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
  1618		}
  1619	out:
  1620		dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
  1621			qid, queue->io_cpu);
  1622	}
  1623	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki



More information about the Linux-nvme mailing list