[PATCH v3 08/10] dm: Add support for copy offload.
Mike Snitzer
snitzer at redhat.com
Tue Feb 22 08:00:31 PST 2022
On Mon, Feb 14 2022 at 2:59P -0500,
Nitesh Shetty <nj.shetty at samsung.com> wrote:
> Before enabling copy for dm target, check if underlying devices and
> dm target support copy. Avoid split happening inside dm target.
> Fail early if the request needs split, currently splitting copy
> request is not supported.
>
> Signed-off-by: Nitesh Shetty <nj.shetty at samsung.com>
> ---
> drivers/md/dm-table.c | 45 +++++++++++++++++++++++++++++++++++
> drivers/md/dm.c | 6 +++++
> include/linux/device-mapper.h | 5 ++++
> 3 files changed, 56 insertions(+)
>
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index e43096cfe9e2..8dc9ae6a6a86 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1903,6 +1903,38 @@ static bool dm_table_supports_nowait(struct dm_table *t)
> return true;
> }
>
> +static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev,
> + sector_t start, sector_t len, void *data)
> +{
> + struct request_queue *q = bdev_get_queue(dev->bdev);
> +
> + return !blk_queue_copy(q);
> +}
> +
> +static bool dm_table_supports_copy(struct dm_table *t)
> +{
> + struct dm_target *ti;
> + unsigned int i;
> +
> + for (i = 0; i < dm_table_get_num_targets(t); i++) {
> + ti = dm_table_get_target(t, i);
> +
> + if (!ti->copy_supported)
> + return false;
> +
> + /*
> + * target provides copy support (as implied by setting
> + * 'copy_supported') and it relies on _all_ data devices having copy support.
> + */
> + if (ti->copy_supported &&
> + (!ti->type->iterate_devices ||
> + ti->type->iterate_devices(ti, device_not_copy_capable, NULL)))
> + return false;
> + }
> +
> + return true;
> +}
> +
> static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
> sector_t start, sector_t len, void *data)
> {
> @@ -2000,6 +2032,19 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
> } else
> blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
>
> + if (!dm_table_supports_copy(t)) {
> + blk_queue_flag_clear(QUEUE_FLAG_COPY, q);
> + /* Must also clear discard limits... */
copy-and-paste mistake: s/discard/copy/ ^
> + q->limits.max_copy_sectors = 0;
> + q->limits.max_hw_copy_sectors = 0;
> + q->limits.max_copy_range_sectors = 0;
> + q->limits.max_hw_copy_range_sectors = 0;
> + q->limits.max_copy_nr_ranges = 0;
> + q->limits.max_hw_copy_nr_ranges = 0;
> + } else {
> + blk_queue_flag_set(QUEUE_FLAG_COPY, q);
> + }
> +
> if (dm_table_supports_secure_erase(t))
> blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
>
> diff --git a/drivers/md/dm.c b/drivers/md/dm.c
> index ab9cc91931f9..3b4cd49c489d 100644
> --- a/drivers/md/dm.c
> +++ b/drivers/md/dm.c
> @@ -1372,6 +1372,12 @@ static int __split_and_process_non_flush(struct clone_info *ci)
> if (__process_abnormal_io(ci, ti, &r))
> return r;
>
> + if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
> + max_io_len(ti, ci->sector) < ci->sector_count)) {
> + DMERR("%s: Error IO size(%u) is greater than maximum target size(%llu)\n",
> + __func__, ci->sector_count, max_io_len(ti, ci->sector));
> + return -EIO;
> + }
> len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
>
> r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
There isn't a need for __func__ prefix here.
You'll also need to rebase on latest dm-5.18 (or wait until 5.18 merge
window opens) because there has been some conflicting changes since
you posted.
> diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
> index b26fecf6c8e8..acfd4018125a 100644
> --- a/include/linux/device-mapper.h
> +++ b/include/linux/device-mapper.h
> @@ -362,6 +362,11 @@ struct dm_target {
> * zone append operations using regular writes.
> */
> bool emulate_zone_append:1;
> +
> + /*
> + * copy offload is supported
> + */
> + bool copy_supported:1;
> };
Would prefer this be "copy_offload_supported".
>
> void *dm_per_bio_data(struct bio *bio, size_t data_size);
> --
> 2.30.0-rc0
>
More information about the Linux-nvme
mailing list