[bug report] rxrpc: Fix locking in rxrpc's sendmsg

Dan Carpenter dan.carpenter at linaro.org
Tue Oct 1 00:35:12 PDT 2024


Hello David Howells,

Commit b0f571ecd794 ("rxrpc: Fix locking in rxrpc's sendmsg") from
Aug 24, 2022 (linux-next), leads to the following Smatch static
checker warning:

	net/rxrpc/sendmsg.c:408 rxrpc_send_data()
	error: uninitialized symbol 'txb'.

net/rxrpc/sendmsg.c
    277 static int rxrpc_send_data(struct rxrpc_sock *rx,
    278                            struct rxrpc_call *call,
    279                            struct msghdr *msg, size_t len,
    280                            rxrpc_notify_end_tx_t notify_end_tx,
    281                            bool *_dropped_lock)
    282 {
    283         struct rxrpc_txbuf *txb;
    284         struct sock *sk = &rx->sk;
    285         enum rxrpc_call_state state;
    286         long timeo;
    287         bool more = msg->msg_flags & MSG_MORE;
    288         int ret, copied = 0;
    289 
    290         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
    291 
    292         ret = rxrpc_wait_to_be_connected(call, &timeo);
    293         if (ret < 0)
    294                 return ret;
    295 
    296         if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
    297                 ret = rxrpc_init_client_conn_security(call->conn);
    298                 if (ret < 0)
    299                         return ret;
    300         }
    301 
    302         /* this should be in poll */
    303         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
    304 
    305 reload:
    306         ret = -EPIPE;
    307         if (sk->sk_shutdown & SEND_SHUTDOWN)
    308                 goto maybe_error;

txb isn't initialized here if we hit it on the first iteration before the
got reload.

    309         state = rxrpc_call_state(call);
    310         ret = -ESHUTDOWN;
    311         if (state >= RXRPC_CALL_COMPLETE)
    312                 goto maybe_error;

There are a couple goto maybe_error before we set txb

    313         ret = -EPROTO;
    314         if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
    315             state != RXRPC_CALL_SERVER_ACK_REQUEST &&
    316             state != RXRPC_CALL_SERVER_SEND_REPLY) {
    317                 /* Request phase complete for this client call */
    318                 trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
    319                                   call->cid, call->call_id, call->rx_consumed,
    320                                   0, -EPROTO);
    321                 goto maybe_error;
    322         }
    323 
    324         ret = -EMSGSIZE;
    325         if (call->tx_total_len != -1) {
    326                 if (len - copied > call->tx_total_len)
    327                         goto maybe_error;
    328                 if (!more && len - copied != call->tx_total_len)
    329                         goto maybe_error;
    330         }
    331 
    332         txb = call->tx_pending;


It's initialized here

    333         call->tx_pending = NULL;
    334         if (txb)
    335                 rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
    336 
    337         do {
    338                 if (!txb) {
    339                         size_t remain;
    340 
    341                         _debug("alloc");
    342 
    343                         if (!rxrpc_check_tx_space(call, NULL))
    344                                 goto wait_for_space;
    345 
    346                         /* Work out the maximum size of a packet.  Assume that
    347                          * the security header is going to be in the padded
    348                          * region (enc blocksize), but the trailer is not.
    349                          */
    350                         remain = more ? INT_MAX : msg_data_left(msg);
    351                         txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
    352                         if (!txb) {
    353                                 ret = -ENOMEM;
    354                                 goto maybe_error;
    355                         }
    356                 }
    357 
    358                 _debug("append");
    359 
    360                 /* append next segment of data to the current buffer */
    361                 if (msg_data_left(msg) > 0) {
    362                         size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
    363 
    364                         _debug("add %zu", copy);
    365                         if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
    366                                                  copy, &msg->msg_iter))
    367                                 goto efault;
    368                         _debug("added");
    369                         txb->space -= copy;
    370                         txb->len += copy;
    371                         txb->offset += copy;
    372                         copied += copy;
    373                         if (call->tx_total_len != -1)
    374                                 call->tx_total_len -= copy;
    375                 }
    376 
    377                 /* check for the far side aborting the call or a network error
    378                  * occurring */
    379                 if (rxrpc_call_is_complete(call))
    380                         goto call_terminated;
    381 
    382                 /* add the packet to the send queue if it's now full */
    383                 if (!txb->space ||
    384                     (msg_data_left(msg) == 0 && !more)) {
    385                         if (msg_data_left(msg) == 0 && !more)
    386                                 txb->flags |= RXRPC_LAST_PACKET;
    387                         else if (call->tx_top - call->acks_hard_ack <
    388                                  call->tx_winsize)
    389                                 txb->flags |= RXRPC_MORE_PACKETS;
    390 
    391                         ret = call->security->secure_packet(call, txb);
    392                         if (ret < 0)
    393                                 goto out;
    394 
    395                         txb->kvec[0].iov_len += txb->len;
    396                         txb->len = txb->kvec[0].iov_len;
    397                         rxrpc_queue_packet(rx, call, txb, notify_end_tx);
    398                         txb = NULL;
    399                 }
    400         } while (msg_data_left(msg) > 0);
    401 
    402 success:
    403         ret = copied;
    404         if (rxrpc_call_is_complete(call) &&
    405             call->error < 0)
    406                 ret = call->error;
    407 out:
--> 408         call->tx_pending = txb;
                                   ^^^

    409         _leave(" = %d", ret);
    410         return ret;
    411 
    412 call_terminated:
    413         rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted);
    414         _leave(" = %d", call->error);
    415         return call->error;
    416 
    417 maybe_error:
    418         if (copied)

copied is zero on the first iteration

    419                 goto success;
    420         goto out;

we goto out

    421 
    422 efault:
    423         ret = -EFAULT;
    424         goto out;
    425 
    426 wait_for_space:
    427         ret = -EAGAIN;
    428         if (msg->msg_flags & MSG_DONTWAIT)
    429                 goto maybe_error;
    430         mutex_unlock(&call->user_mutex);
    431         *_dropped_lock = true;
    432         ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
    433                                        msg->msg_flags & MSG_WAITALL);
    434         if (ret < 0)
    435                 goto maybe_error;
    436         if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
    437                 if (mutex_lock_interruptible(&call->user_mutex) < 0) {
    438                         ret = sock_intr_errno(timeo);
    439                         goto maybe_error;
    440                 }
    441         } else {
    442                 mutex_lock(&call->user_mutex);
    443         }
    444         *_dropped_lock = false;
    445         goto reload;
    446 }

regards,
dan carpenter



More information about the linux-afs mailing list