[linux-nvme:nvme-5.19 5/5] drivers/nvme/target/core.c:1379:23: error: 'struct nvmet_subsys' has no member named 'clear_ids'
kernel test robot
lkp at intel.com
Wed Jun 29 10:35:38 PDT 2022
tree: git://git.infradead.org/nvme.git nvme-5.19
head: 74138c25fae3f1989bef321bfffa324b1cb19dbc
commit: 74138c25fae3f1989bef321bfffa324b1cb19dbc [5/5] nvmet: add a clear_ids attribute for passthru targets
config: i386-randconfig-a003 (https://download.01.org/0day-ci/archive/20220630/202206300139.HO4t1e9v-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
reproduce (this is a W=1 build):
git remote add linux-nvme git://git.infradead.org/nvme.git
git fetch --no-tags linux-nvme nvme-5.19
git checkout 74138c25fae3f1989bef321bfffa324b1cb19dbc
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/nvme/target/
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp at intel.com>
All errors (new ones prefixed by >>):
drivers/nvme/target/core.c: In function 'nvmet_alloc_ctrl':
>> drivers/nvme/target/core.c:1379:23: error: 'struct nvmet_subsys' has no member named 'clear_ids'
1379 | subsys->clear_ids = 1;
| ^~
vim +1379 drivers/nvme/target/core.c
1337
1338 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1339 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1340 {
1341 struct nvmet_subsys *subsys;
1342 struct nvmet_ctrl *ctrl;
1343 int ret;
1344 u16 status;
1345
1346 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1347 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1348 if (!subsys) {
1349 pr_warn("connect request for invalid subsystem %s!\n",
1350 subsysnqn);
1351 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1352 req->error_loc = offsetof(struct nvme_common_command, dptr);
1353 goto out;
1354 }
1355
1356 down_read(&nvmet_config_sem);
1357 if (!nvmet_host_allowed(subsys, hostnqn)) {
1358 pr_info("connect by host %s for subsystem %s not allowed\n",
1359 hostnqn, subsysnqn);
1360 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1361 up_read(&nvmet_config_sem);
1362 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1363 req->error_loc = offsetof(struct nvme_common_command, dptr);
1364 goto out_put_subsystem;
1365 }
1366 up_read(&nvmet_config_sem);
1367
1368 status = NVME_SC_INTERNAL;
1369 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1370 if (!ctrl)
1371 goto out_put_subsystem;
1372 mutex_init(&ctrl->lock);
1373
1374 ctrl->port = req->port;
1375 ctrl->ops = req->ops;
1376
1377 /* By default, set loop targets to clear IDS by default */
1378 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
> 1379 subsys->clear_ids = 1;
1380
1381 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1382 INIT_LIST_HEAD(&ctrl->async_events);
1383 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1384 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1385 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1386
1387 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1388 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1389
1390 kref_init(&ctrl->ref);
1391 ctrl->subsys = subsys;
1392 nvmet_init_cap(ctrl);
1393 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1394
1395 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1396 sizeof(__le32), GFP_KERNEL);
1397 if (!ctrl->changed_ns_list)
1398 goto out_free_ctrl;
1399
1400 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1401 sizeof(struct nvmet_sq *),
1402 GFP_KERNEL);
1403 if (!ctrl->sqs)
1404 goto out_free_changed_ns_list;
1405
1406 if (subsys->cntlid_min > subsys->cntlid_max)
1407 goto out_free_sqs;
1408
1409 ret = ida_alloc_range(&cntlid_ida,
1410 subsys->cntlid_min, subsys->cntlid_max,
1411 GFP_KERNEL);
1412 if (ret < 0) {
1413 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1414 goto out_free_sqs;
1415 }
1416 ctrl->cntlid = ret;
1417
1418 /*
1419 * Discovery controllers may use some arbitrary high value
1420 * in order to cleanup stale discovery sessions
1421 */
1422 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1423 kato = NVMET_DISC_KATO_MS;
1424
1425 /* keep-alive timeout in seconds */
1426 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1427
1428 ctrl->err_counter = 0;
1429 spin_lock_init(&ctrl->error_lock);
1430
1431 nvmet_start_keep_alive_timer(ctrl);
1432
1433 mutex_lock(&subsys->lock);
1434 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1435 nvmet_setup_p2p_ns_map(ctrl, req);
1436 mutex_unlock(&subsys->lock);
1437
1438 *ctrlp = ctrl;
1439 return 0;
1440
1441 out_free_sqs:
1442 kfree(ctrl->sqs);
1443 out_free_changed_ns_list:
1444 kfree(ctrl->changed_ns_list);
1445 out_free_ctrl:
1446 kfree(ctrl);
1447 out_put_subsystem:
1448 nvmet_subsys_put(subsys);
1449 out:
1450 return status;
1451 }
1452
--
0-DAY CI Kernel Test Service
https://01.org/lkp
More information about the Linux-nvme
mailing list