[bug report] afs: Add support for RENAME_NOREPLACE and RENAME_EXCHANGE
Dan Carpenter
dan.carpenter at linaro.org
Thu Sep 18 10:51:31 PDT 2025
Hello David Howells,
Commit 09c69289a273 ("afs: Add support for RENAME_NOREPLACE and
RENAME_EXCHANGE") from Sep 3, 2025 (linux-next), leads to the
following Smatch static checker warning:
fs/afs/rotate.c:637 afs_select_fileserver() error: uninitialized symbol 'addr_index'.
fs/afs/rotate.c:637 afs_select_fileserver() error: uninitialized symbol 'alist'.
fs/afs/rotate.c
152 bool afs_select_fileserver(struct afs_operation *op)
153 {
154 struct afs_addr_list *alist;
155 struct afs_server *server;
156 struct afs_vnode *vnode = op->file[0].vnode;
157 unsigned long set, failed;
158 s32 abort_code = op->call_abort_code;
159 int best_prio = 0;
160 int error = op->call_error, addr_index, i, j;
161
162 op->nr_iterations++;
163
164 _enter("OP=%x+%x,%llx,%u{%lx},%u{%lx},%d,%d",
165 op->debug_id, op->nr_iterations, op->volume->vid,
166 op->server_index, op->untried_servers,
167 op->addr_index, op->addr_tried,
168 error, abort_code);
169
170 if (op->flags & AFS_OPERATION_STOP) {
171 trace_afs_rotate(op, afs_rotate_trace_stopped, 0);
172 _leave(" = f [stopped]");
173 return false;
174 }
175
176 if (op->nr_iterations == 0)
177 goto start;
178
179 WRITE_ONCE(op->estate->addresses->addrs[op->addr_index].last_error, error);
180 trace_afs_rotate(op, afs_rotate_trace_iter, op->call_error);
181
182 /* Evaluate the result of the previous operation, if there was one. */
183 switch (op->call_error) {
184 case 0:
185 clear_bit(AFS_SE_VOLUME_OFFLINE,
186 &op->server_list->servers[op->server_index].flags);
187 clear_bit(AFS_SE_VOLUME_BUSY,
188 &op->server_list->servers[op->server_index].flags);
189 op->cumul_error.responded = true;
190
191 /* We succeeded, but we may need to redo the op from another
192 * server if we're looking at a set of RO volumes where some of
193 * the servers have not yet been brought up to date lest we
194 * regress the data. We only switch to the new version once
195 * >=50% of the servers are updated.
196 */
197 error = afs_update_volume_state(op);
198 if (error != 0) {
199 if (error == 1) {
200 afs_sleep_and_retry(op);
201 goto restart_from_beginning;
202 }
203 afs_op_set_error(op, error);
204 goto failed;
205 }
206 fallthrough;
207 default:
208 /* Success or local failure. Stop. */
209 afs_op_set_error(op, error);
210 op->flags |= AFS_OPERATION_STOP;
211 trace_afs_rotate(op, afs_rotate_trace_stop, error);
212 _leave(" = f [okay/local %d]", error);
213 return false;
214
215 case -ECONNABORTED:
216 /* The far side rejected the operation on some grounds. This
217 * might involve the server being busy or the volume having been moved.
218 *
219 * Note that various V* errors should not be sent to a cache manager
220 * by a fileserver as they should be translated to more modern UAE*
221 * errors instead. IBM AFS and OpenAFS fileservers, however, do leak
222 * these abort codes.
223 */
224 trace_afs_rotate(op, afs_rotate_trace_aborted, abort_code);
225 op->cumul_error.responded = true;
226 switch (abort_code) {
227 case VNOVOL:
228 /* This fileserver doesn't know about the volume.
229 * - May indicate that the VL is wrong - retry once and compare
230 * the results.
231 * - May indicate that the fileserver couldn't attach to the vol.
232 * - The volume might have been temporarily removed so that it can
233 * be replaced by a volume restore. "vos" might have ended one
234 * transaction and has yet to create the next.
235 * - The volume might not be blessed or might not be in-service
236 * (administrative action).
237 */
238 if (op->flags & AFS_OPERATION_VNOVOL) {
239 afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
240 goto next_server;
241 }
242
243 write_lock(&op->volume->servers_lock);
244 op->server_list->vnovol_mask |= 1 << op->server_index;
245 write_unlock(&op->volume->servers_lock);
246
247 set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
248 error = afs_check_volume_status(op->volume, op);
249 if (error < 0) {
250 afs_op_set_error(op, error);
251 goto failed;
252 }
253
254 if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) {
255 afs_op_set_error(op, -ENOMEDIUM);
256 goto failed;
257 }
258
259 /* If the server list didn't change, then assume that
260 * it's the fileserver having trouble.
261 */
262 if (rcu_access_pointer(op->volume->servers) == op->server_list) {
263 afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
264 goto next_server;
265 }
266
267 /* Try again */
268 op->flags |= AFS_OPERATION_VNOVOL;
269 _leave(" = t [vnovol]");
270 return true;
271
272 case VVOLEXISTS:
273 case VONLINE:
274 /* These should not be returned from the fileserver. */
275 pr_warn("Fileserver returned unexpected abort %d\n",
276 abort_code);
277 afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
278 goto next_server;
279
280 case VNOSERVICE:
281 /* Prior to AFS 3.2 VNOSERVICE was returned from the fileserver
282 * if the volume was neither in-service nor administratively
283 * blessed. All usage was replaced by VNOVOL because AFS 3.1 and
284 * earlier cache managers did not handle VNOSERVICE and assumed
285 * it was the client OSes errno 105.
286 *
287 * Starting with OpenAFS 1.4.8 VNOSERVICE was repurposed as the
288 * fileserver idle dead time error which was sent in place of
289 * RX_CALL_TIMEOUT (-3). The error was intended to be sent if the
290 * fileserver took too long to send a reply to the client.
291 * RX_CALL_TIMEOUT would have caused the cache manager to mark the
292 * server down whereas VNOSERVICE since AFS 3.2 would cause cache
293 * manager to temporarily (up to 15 minutes) mark the volume
294 * instance as unusable.
295 *
296 * The idle dead logic resulted in cache inconsistency since a
297 * state changing call that the cache manager assumed was dead
298 * could still be processed to completion by the fileserver. This
299 * logic was removed in OpenAFS 1.8.0 and VNOSERVICE is no longer
300 * returned. However, many 1.4.8 through 1.6.24 fileservers are
301 * still in existence.
302 *
303 * AuriStorFS fileservers have never returned VNOSERVICE.
304 *
305 * VNOSERVICE should be treated as an alias for RX_CALL_TIMEOUT.
306 */
307 case RX_CALL_TIMEOUT:
308 afs_op_accumulate_error(op, -ETIMEDOUT, abort_code);
309 goto next_server;
310
311 case VSALVAGING: /* This error should not be leaked to cache managers
312 * but is from OpenAFS demand attach fileservers.
313 * It should be treated as an alias for VOFFLINE.
314 */
315 case VSALVAGE: /* VSALVAGE should be treated as a synonym of VOFFLINE */
316 case VOFFLINE:
317 /* The volume is in use by the volserver or another volume utility
318 * for an operation that might alter the contents. The volume is
319 * expected to come back but it might take a long time (could be
320 * days).
321 */
322 if (!test_and_set_bit(AFS_SE_VOLUME_OFFLINE,
323 &op->server_list->servers[op->server_index].flags)) {
324 afs_busy(op, abort_code);
325 clear_bit(AFS_SE_VOLUME_BUSY,
326 &op->server_list->servers[op->server_index].flags);
327 }
328 if (op->flags & AFS_OPERATION_NO_VSLEEP) {
329 afs_op_set_error(op, -EADV);
330 goto failed;
331 }
332 goto busy;
333
334 case VRESTARTING: /* The fileserver is either shutting down or starting up. */
335 case VBUSY:
336 /* The volume is in use by the volserver or another volume
337 * utility for an operation that is not expected to alter the
338 * contents of the volume. VBUSY does not need to be returned
339 * for a ROVOL or BACKVOL bound to an ITBusy volserver
340 * transaction. The fileserver is permitted to continue serving
341 * content from ROVOLs and BACKVOLs during an ITBusy transaction
342 * because the content will not change. However, many fileserver
343 * releases do return VBUSY for ROVOL and BACKVOL instances under
344 * many circumstances.
345 *
346 * Retry after going round all the servers unless we have a file
347 * lock we need to maintain.
348 */
349 if (op->flags & AFS_OPERATION_NO_VSLEEP) {
350 afs_op_set_error(op, -EBUSY);
351 goto failed;
352 }
353 if (!test_and_set_bit(AFS_SE_VOLUME_BUSY,
354 &op->server_list->servers[op->server_index].flags)) {
355 afs_busy(op, abort_code);
356 clear_bit(AFS_SE_VOLUME_OFFLINE,
357 &op->server_list->servers[op->server_index].flags);
358 }
359 busy:
360 if (op->flags & AFS_OPERATION_CUR_ONLY) {
361 if (!afs_sleep_and_retry(op))
362 goto failed;
363
364 /* Retry with same server & address */
365 _leave(" = t [vbusy]");
366 return true;
367 }
368
369 op->flags |= AFS_OPERATION_VBUSY;
370 goto next_server;
371
372 case VMOVED:
373 /* The volume migrated to another server. We consider
374 * consider all locks and callbacks broken and request
375 * an update from the VLDB.
376 *
377 * We also limit the number of VMOVED hops we will
378 * honour, just in case someone sets up a loop.
379 */
380 if (op->flags & AFS_OPERATION_VMOVED) {
381 afs_op_set_error(op, -EREMOTEIO);
382 goto failed;
383 }
384 op->flags |= AFS_OPERATION_VMOVED;
385
386 set_bit(AFS_VOLUME_WAIT, &op->volume->flags);
387 set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
388 error = afs_check_volume_status(op->volume, op);
389 if (error < 0) {
390 afs_op_set_error(op, error);
391 goto failed;
392 }
393
394 /* If the server list didn't change, then the VLDB is
395 * out of sync with the fileservers. This is hopefully
396 * a temporary condition, however, so we don't want to
397 * permanently block access to the file.
398 *
399 * TODO: Try other fileservers if we can.
400 *
401 * TODO: Retry a few times with sleeps.
402 */
403 if (rcu_access_pointer(op->volume->servers) == op->server_list) {
404 afs_op_accumulate_error(op, -ENOMEDIUM, abort_code);
405 goto failed;
406 }
407
408 goto restart_from_beginning;
409
410 case UAEIO:
411 case VIO:
412 afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
413 if (op->volume->type != AFSVL_RWVOL)
414 goto next_server;
415 goto failed;
416
417 case VDISKFULL:
418 case UAENOSPC:
419 /* The partition is full. Only applies to RWVOLs.
420 * Translate locally and return ENOSPC.
421 * No replicas to failover to.
422 */
423 afs_op_set_error(op, -ENOSPC);
424 goto failed_but_online;
425
426 case VOVERQUOTA:
427 case UAEDQUOT:
428 /* Volume is full. Only applies to RWVOLs.
429 * Translate locally and return EDQUOT.
430 * No replicas to failover to.
431 */
432 afs_op_set_error(op, -EDQUOT);
433 goto failed_but_online;
434
435 case RX_INVALID_OPERATION:
436 case RXGEN_OPCODE:
437 /* Handle downgrading to an older operation. */
438 afs_op_set_error(op, -ENOTSUPP);
439 if (op->flags & AFS_OPERATION_DOWNGRADE) {
440 op->flags &= ~AFS_OPERATION_DOWNGRADE;
441 goto go_again;
addr_index is not set on this path.
442 }
443 goto failed_but_online;
444
445 default:
446 afs_op_accumulate_error(op, error, abort_code);
447 failed_but_online:
448 clear_bit(AFS_SE_VOLUME_OFFLINE,
449 &op->server_list->servers[op->server_index].flags);
450 clear_bit(AFS_SE_VOLUME_BUSY,
451 &op->server_list->servers[op->server_index].flags);
452 goto failed;
453 }
454
455 case -ETIMEDOUT:
456 case -ETIME:
457 if (afs_op_error(op) != -EDESTADDRREQ)
458 goto iterate_address;
459 fallthrough;
460 case -ERFKILL:
461 case -EADDRNOTAVAIL:
462 case -ENETUNREACH:
463 case -EHOSTUNREACH:
464 case -EHOSTDOWN:
465 case -ECONNREFUSED:
466 _debug("no conn");
467 afs_op_accumulate_error(op, error, 0);
468 goto iterate_address;
469
470 case -ENETRESET:
471 pr_warn("kAFS: Peer reset %s (op=%x)\n",
472 op->type ? op->type->name : "???", op->debug_id);
473 fallthrough;
474 case -ECONNRESET:
475 _debug("call reset");
476 afs_op_set_error(op, error);
477 goto failed;
478 }
479
480 restart_from_beginning:
481 trace_afs_rotate(op, afs_rotate_trace_restart, 0);
482 _debug("restart");
483 op->estate = NULL;
484 op->server = NULL;
485 afs_clear_server_states(op);
486 op->server_states = NULL;
487 afs_put_serverlist(op->net, op->server_list);
488 op->server_list = NULL;
489 start:
490 _debug("start");
491 ASSERTCMP(op->estate, ==, NULL);
492 /* See if we need to do an update of the volume record. Note that the
493 * volume may have moved or even have been deleted.
494 */
495 error = afs_check_volume_status(op->volume, op);
496 trace_afs_rotate(op, afs_rotate_trace_check_vol_status, error);
497 if (error < 0) {
498 afs_op_set_error(op, error);
499 goto failed;
500 }
501
502 if (!afs_start_fs_iteration(op, vnode))
503 goto failed;
504
505 _debug("__ VOL %llx __", op->volume->vid);
506
507 pick_server:
508 _debug("pick [%lx]", op->untried_servers);
509 ASSERTCMP(op->estate, ==, NULL);
510
511 error = afs_wait_for_fs_probes(op, op->server_states,
512 !(op->flags & AFS_OPERATION_UNINTR));
513 switch (error) {
514 case 0: /* No untried responsive servers and no outstanding probes */
515 trace_afs_rotate(op, afs_rotate_trace_probe_none, 0);
516 goto no_more_servers;
517 case 1: /* Got a response */
518 trace_afs_rotate(op, afs_rotate_trace_probe_response, 0);
519 break;
520 case 2: /* Probe data superseded */
521 trace_afs_rotate(op, afs_rotate_trace_probe_superseded, 0);
522 goto restart_from_beginning;
523 default:
524 trace_afs_rotate(op, afs_rotate_trace_probe_error, error);
525 afs_op_set_error(op, error);
526 goto failed;
527 }
528
529 /* Pick the untried server with the highest priority untried endpoint.
530 * If we have outstanding callbacks, we stick with the server we're
531 * already using if we can.
532 */
533 if (op->server) {
534 _debug("server %u", op->server_index);
535 if (test_bit(op->server_index, &op->untried_servers))
536 goto selected_server;
537 op->server = NULL;
538 _debug("no server");
539 }
540
541 rcu_read_lock();
542 op->server_index = -1;
543 best_prio = -1;
544 for (i = 0; i < op->server_list->nr_servers; i++) {
545 struct afs_endpoint_state *es;
546 struct afs_server_entry *se = &op->server_list->servers[i];
547 struct afs_addr_list *sal;
548 struct afs_server *s = se->server;
549
550 if (!test_bit(i, &op->untried_servers) ||
551 test_bit(AFS_SE_EXCLUDED, &se->flags) ||
552 !test_bit(AFS_SERVER_FL_RESPONDING, &s->flags))
553 continue;
554 es = op->server_states[i].endpoint_state;
555 sal = es->addresses;
556
557 afs_get_address_preferences_rcu(op->net, sal);
558 for (j = 0; j < sal->nr_addrs; j++) {
559 if (es->failed_set & (1 << j))
560 continue;
561 if (!sal->addrs[j].peer)
562 continue;
563 if (sal->addrs[j].prio > best_prio) {
564 op->server_index = i;
565 best_prio = sal->addrs[j].prio;
566 }
567 }
568 }
569 rcu_read_unlock();
570
571 if (op->server_index == -1)
572 goto no_more_servers;
573
574 selected_server:
575 trace_afs_rotate(op, afs_rotate_trace_selected_server, best_prio);
576 _debug("use %d prio %u", op->server_index, best_prio);
577 __clear_bit(op->server_index, &op->untried_servers);
578
579 /* We're starting on a different fileserver from the list. We need to
580 * check it, create a callback intercept, find its address list and
581 * probe its capabilities before we use it.
582 */
583 ASSERTCMP(op->estate, ==, NULL);
584 server = op->server_list->servers[op->server_index].server;
585
586 if (!afs_check_server_record(op, server, op->key))
587 goto failed;
588
589 _debug("USING SERVER: %pU", &server->uuid);
590
591 op->flags |= AFS_OPERATION_RETRY_SERVER;
592 op->server = server;
593 if (vnode->cb_server != server) {
594 vnode->cb_server = server;
595 vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
596 afs_clear_cb_promise(vnode, afs_cb_promise_clear_server_change);
597 }
598
599 retry_server:
600 op->addr_tried = 0;
601 op->addr_index = -1;
602
603 iterate_address:
604 /* Iterate over the current server's address list to try and find an
605 * address on which it will respond to us.
606 */
607 op->estate = op->server_states[op->server_index].endpoint_state;
608 set = READ_ONCE(op->estate->responsive_set);
609 failed = READ_ONCE(op->estate->failed_set);
610 _debug("iterate ES=%x rs=%lx fs=%lx", op->estate->probe_seq, set, failed);
611 set &= ~(failed | op->addr_tried);
612 trace_afs_rotate(op, afs_rotate_trace_iterate_addr, set);
613 if (!set)
614 goto wait_for_more_probe_results;
615
616 alist = op->estate->addresses;
617 best_prio = -1;
618 addr_index = 0;
619 for (i = 0; i < alist->nr_addrs; i++) {
620 if (!(set & (1 << i)))
621 continue;
622 if (alist->addrs[i].prio > best_prio) {
623 addr_index = i;
624 best_prio = alist->addrs[i].prio;
625 }
626 }
627
628 alist->preferred = addr_index;
629
630 op->addr_index = addr_index;
631 set_bit(addr_index, &op->addr_tried);
632
633 go_again:
634 op->volsync.creation = TIME64_MIN;
635 op->volsync.update = TIME64_MIN;
636 op->call_responded = false;
--> 637 _debug("address [%u] %u/%u %pISp",
638 op->server_index, addr_index, alist->nr_addrs,
^^^^^^^^^^
639 rxrpc_kernel_remote_addr(alist->addrs[op->addr_index].peer));
640 _leave(" = t");
641 return true;
642
643 wait_for_more_probe_results:
644 error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
645 !(op->flags & AFS_OPERATION_UNINTR));
646 if (error == 1)
647 goto iterate_address;
648 if (!error)
649 goto restart_from_beginning;
650
651 /* We've now had a failure to respond on all of a server's addresses -
652 * immediately probe them again and consider retrying the server.
653 */
654 trace_afs_rotate(op, afs_rotate_trace_probe_fileserver, 0);
655 afs_probe_fileserver(op->net, op->server);
656 if (op->flags & AFS_OPERATION_RETRY_SERVER) {
657 error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
658 !(op->flags & AFS_OPERATION_UNINTR));
659 switch (error) {
660 case 1:
661 op->flags &= ~AFS_OPERATION_RETRY_SERVER;
662 trace_afs_rotate(op, afs_rotate_trace_retry_server, 1);
663 goto retry_server;
664 case 0:
665 trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
666 goto restart_from_beginning;
667 case -ERESTARTSYS:
668 afs_op_set_error(op, error);
669 goto failed;
670 case -ETIME:
671 case -EDESTADDRREQ:
672 goto next_server;
673 }
674 }
675
676 next_server:
677 trace_afs_rotate(op, afs_rotate_trace_next_server, 0);
678 _debug("next");
679 op->estate = NULL;
680 goto pick_server;
681
682 no_more_servers:
683 /* That's all the servers poked to no good effect. Try again if some
684 * of them were busy.
685 */
686 trace_afs_rotate(op, afs_rotate_trace_no_more_servers, 0);
687 if (op->flags & AFS_OPERATION_VBUSY) {
688 afs_sleep_and_retry(op);
689 op->flags &= ~AFS_OPERATION_VBUSY;
690 goto restart_from_beginning;
691 }
692
693 rcu_read_lock();
694 for (i = 0; i < op->server_list->nr_servers; i++) {
695 struct afs_endpoint_state *estate;
696
697 estate = op->server_states[i].endpoint_state;
698 error = READ_ONCE(estate->error);
699 if (error < 0)
700 afs_op_accumulate_error(op, error, estate->abort_code);
701 }
702 rcu_read_unlock();
703
704 failed:
705 trace_afs_rotate(op, afs_rotate_trace_failed, 0);
706 op->flags |= AFS_OPERATION_STOP;
707 op->estate = NULL;
708 _leave(" = f [failed %d]", afs_op_error(op));
709 return false;
710 }
regards,
dan carpenter
More information about the linux-afs
mailing list