[PATCH v5 2/7] staging: vchiq_core: Simplify vchiq_bulk_transfer()
Stefan Wahren
wahrenst at gmx.net
Mon Sep 9 11:06:43 PDT 2024
Hi Umang,
Am 09.09.24 um 08:14 schrieb Umang Jain:
> Factor out core logic for preparing bulk data transfer(mutex locking,
> waits on vchiq_bulk_queue wait-queue, initialising the bulk transfer)
> out of the vchiq_bulk_transfer(). This simplifies the existing
> vchiq_bulk_transfer() and makes it more readable since all the core
> logic is handled in vchiq_bulk_xfer_queue_msg_interruptible(). It
> will also help us to refactor vchiq_bulk_transfer() easily for different
> vchiq bulk transfer modes.
>
> No functional changes intended in this patch.
>
> Signed-off-by: Umang Jain <umang.jain at ideasonboard.com>
> ---
> .../interface/vchiq_arm/vchiq_core.c | 217 ++++++++++--------
> 1 file changed, 121 insertions(+), 96 deletions(-)
>
> diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
> index 2239f59519be..f36044bab194 100644
> --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
> +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
> @@ -189,6 +189,13 @@ static const char *const conn_state_names[] = {
> static void
> release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
>
> +static int
> +vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
> + void *offset, void __user *uoffset,
> + int size, void *userdata,
> + enum vchiq_bulk_mode mode,
> + enum vchiq_bulk_dir dir);
please no forward declaration of this function. This makes maintenance
harder.
Except of this, i'm fine with this series.
Best regards
> +
> static const char *msg_type_str(unsigned int msg_type)
> {
> switch (msg_type) {
> @@ -2991,15 +2998,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
> enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
> {
> struct vchiq_service *service = find_service_by_handle(instance, handle);
> - struct vchiq_bulk_queue *queue;
> - struct vchiq_bulk *bulk;
> - struct vchiq_state *state;
> struct bulk_waiter *bulk_waiter = NULL;
> - const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
> - const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
> - VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
> + struct vchiq_bulk *bulk;
> int status = -EINVAL;
> - int payload[2];
>
> if (!service)
> goto error_exit;
> @@ -3027,89 +3028,10 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
> goto error_exit;
> }
>
> - state = service->state;
> -
> - queue = (dir == VCHIQ_BULK_TRANSMIT) ?
> - &service->bulk_tx : &service->bulk_rx;
> -
> - if (mutex_lock_killable(&service->bulk_mutex)) {
> - status = -EAGAIN;
> - goto error_exit;
> - }
> -
> - if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
> - VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
> - do {
> - mutex_unlock(&service->bulk_mutex);
> - if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
> - status = -EAGAIN;
> - goto error_exit;
> - }
> - if (mutex_lock_killable(&service->bulk_mutex)) {
> - status = -EAGAIN;
> - goto error_exit;
> - }
> - } while (queue->local_insert == queue->remove +
> - VCHIQ_NUM_SERVICE_BULKS);
> - }
> -
> - bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
> -
> - bulk->mode = mode;
> - bulk->dir = dir;
> - bulk->userdata = userdata;
> - bulk->size = size;
> - bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
> -
> - if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
> - goto unlock_error_exit;
> -
> - /*
> - * Ensure that the bulk data record is visible to the peer
> - * before proceeding.
> - */
> - wmb();
> -
> - dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
> - state->id, service->localport, service->remoteport,
> - dir_char, size, &bulk->data, userdata);
> -
> - /*
> - * The slot mutex must be held when the service is being closed, so
> - * claim it here to ensure that isn't happening
> - */
> - if (mutex_lock_killable(&state->slot_mutex)) {
> - status = -EAGAIN;
> - goto cancel_bulk_error_exit;
> - }
> -
> - if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
> - goto unlock_both_error_exit;
> -
> - payload[0] = lower_32_bits(bulk->data);
> - payload[1] = bulk->size;
> - status = queue_message(state,
> - NULL,
> - VCHIQ_MAKE_MSG(dir_msgtype,
> - service->localport,
> - service->remoteport),
> - memcpy_copy_callback,
> - &payload,
> - sizeof(payload),
> - QMFLAGS_IS_BLOCKING |
> - QMFLAGS_NO_MUTEX_LOCK |
> - QMFLAGS_NO_MUTEX_UNLOCK);
> + status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset,
> + size, userdata, mode, dir);
> if (status)
> - goto unlock_both_error_exit;
> -
> - queue->local_insert++;
> -
> - mutex_unlock(&state->slot_mutex);
> - mutex_unlock(&service->bulk_mutex);
> -
> - dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
> - state->id, service->localport, dir_char, queue->local_insert,
> - queue->remote_insert, queue->process);
> + goto error_exit;
>
> vchiq_service_put(service);
>
> @@ -3123,13 +3045,6 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
>
> return 0;
>
> -unlock_both_error_exit:
> - mutex_unlock(&state->slot_mutex);
> -cancel_bulk_error_exit:
> - vchiq_complete_bulk(service->instance, bulk);
> -unlock_error_exit:
> - mutex_unlock(&service->bulk_mutex);
> -
> error_exit:
> if (service)
> vchiq_service_put(service);
> @@ -3289,6 +3204,116 @@ vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
> }
> EXPORT_SYMBOL(vchiq_release_message);
>
> +/*
> + * Prepares a bulk transfer to be queued. The function is interruptible and is
> + * intended to be called from user threads. It may return -EAGAIN to indicate
> + * that a signal has been received and the call should be retried after being
> + * returned to user context.
> + */
> +static int
> +vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
> + void *offset, void __user *uoffset,
> + int size, void *userdata,
> + enum vchiq_bulk_mode mode,
> + enum vchiq_bulk_dir dir)
> +{
> + struct vchiq_bulk_queue *queue;
> + struct vchiq_bulk *bulk;
> + struct vchiq_state *state = service->state;
> + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
> + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
> + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
> + int status = -EINVAL;
> + int payload[2];
> +
> + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
> + &service->bulk_tx : &service->bulk_rx;
> +
> + if (mutex_lock_killable(&service->bulk_mutex))
> + return -EAGAIN;
> +
> + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
> + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
> + do {
> + mutex_unlock(&service->bulk_mutex);
> + if (wait_for_completion_interruptible(&service->bulk_remove_event))
> + return -EAGAIN;
> + if (mutex_lock_killable(&service->bulk_mutex))
> + return -EAGAIN;
> + } while (queue->local_insert == queue->remove +
> + VCHIQ_NUM_SERVICE_BULKS);
> + }
> +
> + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
> +
> + bulk->mode = mode;
> + bulk->dir = dir;
> + bulk->userdata = userdata;
> + bulk->size = size;
> + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
> +
> + if (vchiq_prepare_bulk_data(service->instance, bulk, offset, uoffset, size, dir))
> + goto unlock_error_exit;
> +
> + /*
> + * Ensure that the bulk data record is visible to the peer
> + * before proceeding.
> + */
> + wmb();
> +
> + dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
> + state->id, service->localport, service->remoteport,
> + dir_char, size, &bulk->data, userdata);
> +
> + /*
> + * The slot mutex must be held when the service is being closed, so
> + * claim it here to ensure that isn't happening
> + */
> + if (mutex_lock_killable(&state->slot_mutex)) {
> + status = -EAGAIN;
> + goto cancel_bulk_error_exit;
> + }
> +
> + if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
> + goto unlock_both_error_exit;
> +
> + payload[0] = lower_32_bits(bulk->data);
> + payload[1] = bulk->size;
> + status = queue_message(state,
> + NULL,
> + VCHIQ_MAKE_MSG(dir_msgtype,
> + service->localport,
> + service->remoteport),
> + memcpy_copy_callback,
> + &payload,
> + sizeof(payload),
> + QMFLAGS_IS_BLOCKING |
> + QMFLAGS_NO_MUTEX_LOCK |
> + QMFLAGS_NO_MUTEX_UNLOCK);
> + if (status)
> + goto unlock_both_error_exit;
> +
> + queue->local_insert++;
> +
> + mutex_unlock(&state->slot_mutex);
> + mutex_unlock(&service->bulk_mutex);
> +
> + dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
> + state->id, service->localport, dir_char, queue->local_insert,
> + queue->remote_insert, queue->process);
> +
> + return status;
> +
> +unlock_both_error_exit:
> + mutex_unlock(&state->slot_mutex);
> +cancel_bulk_error_exit:
> + vchiq_complete_bulk(service->instance, bulk);
> +unlock_error_exit:
> + mutex_unlock(&service->bulk_mutex);
> +
> + return status;
> +}
> +
> static void
> release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
> {
More information about the linux-rpi-kernel
mailing list