[PATCH v2 03/10] block: Add copy offload support infrastructure
kernel test robot
lkp at intel.com
Mon Feb 7 15:26:02 PST 2022
Hi Nitesh,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on axboe-block/for-next]
[also build test WARNING on next-20220207]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nitesh-Shetty/block-make-bio_map_kern-non-static/20220207-231407
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: hexagon-randconfig-r045-20220207 (https://download.01.org/0day-ci/archive/20220208/202202080735.lyaEe5Bq-lkp@intel.com/config)
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project 0d8850ae2cae85d49bea6ae0799fa41c7202c05c)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/12a9801a7301f1a1e2ea355c5a4438dab17894cf
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nitesh-Shetty/block-make-bio_map_kern-non-static/20220207-231407
git checkout 12a9801a7301f1a1e2ea355c5a4438dab17894cf
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp at intel.com>
All warnings (new ones prefixed by >>):
>> block/blk-lib.c:185:5: warning: no previous prototype for function 'blk_copy_offload' [-Wmissing-prototypes]
int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
^
block/blk-lib.c:185:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
^
static
1 warning generated.
vim +/blk_copy_offload +185 block/blk-lib.c
180
181 /*
182 * blk_copy_offload - Use device's native copy offload feature
183 * Go through user provide payload, prepare new payload based on device's copy offload limits.
184 */
> 185 int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
186 struct range_entry *rlist, struct block_device *dst_bdev, gfp_t gfp_mask)
187 {
188 struct request_queue *sq = bdev_get_queue(src_bdev);
189 struct request_queue *dq = bdev_get_queue(dst_bdev);
190 struct bio *read_bio, *write_bio;
191 struct copy_ctx *ctx;
192 struct cio *cio;
193 struct page *token;
194 sector_t src_blk, copy_len, dst_blk;
195 sector_t remaining, max_copy_len = LONG_MAX;
196 int ri = 0, ret = 0;
197
198 cio = kzalloc(sizeof(struct cio), GFP_KERNEL);
199 if (!cio)
200 return -ENOMEM;
201 atomic_set(&cio->refcount, 0);
202 cio->rlist = rlist;
203
204 max_copy_len = min3(max_copy_len, (sector_t)sq->limits.max_copy_sectors,
205 (sector_t)dq->limits.max_copy_sectors);
206 max_copy_len = min3(max_copy_len, (sector_t)sq->limits.max_copy_range_sectors,
207 (sector_t)dq->limits.max_copy_range_sectors) << SECTOR_SHIFT;
208
209 for (ri = 0; ri < nr_srcs; ri++) {
210 cio->rlist[ri].comp_len = rlist[ri].len;
211 for (remaining = rlist[ri].len, src_blk = rlist[ri].src, dst_blk = rlist[ri].dst;
212 remaining > 0;
213 remaining -= copy_len, src_blk += copy_len, dst_blk += copy_len) {
214 copy_len = min(remaining, max_copy_len);
215
216 token = alloc_page(gfp_mask);
217 if (unlikely(!token)) {
218 ret = -ENOMEM;
219 goto err_token;
220 }
221
222 read_bio = bio_alloc(src_bdev, 1, REQ_OP_READ | REQ_COPY | REQ_NOMERGE,
223 gfp_mask);
224 if (!read_bio) {
225 ret = -ENOMEM;
226 goto err_read_bio;
227 }
228 read_bio->bi_iter.bi_sector = src_blk >> SECTOR_SHIFT;
229 read_bio->bi_iter.bi_size = copy_len;
230 __bio_add_page(read_bio, token, PAGE_SIZE, 0);
231 ret = submit_bio_wait(read_bio);
232 if (ret) {
233 bio_put(read_bio);
234 goto err_read_bio;
235 }
236 bio_put(read_bio);
237 ctx = kzalloc(sizeof(struct copy_ctx), gfp_mask);
238 if (!ctx) {
239 ret = -ENOMEM;
240 goto err_read_bio;
241 }
242 ctx->cio = cio;
243 ctx->range_idx = ri;
244 ctx->start_sec = rlist[ri].src;
245
246 write_bio = bio_alloc(dst_bdev, 1, REQ_OP_WRITE | REQ_COPY | REQ_NOMERGE,
247 gfp_mask);
248 if (!write_bio) {
249 ret = -ENOMEM;
250 goto err_read_bio;
251 }
252
253 write_bio->bi_iter.bi_sector = dst_blk >> SECTOR_SHIFT;
254 write_bio->bi_iter.bi_size = copy_len;
255 __bio_add_page(write_bio, token, PAGE_SIZE, 0);
256 write_bio->bi_end_io = bio_copy_end_io;
257 write_bio->bi_private = ctx;
258 atomic_inc(&cio->refcount);
259 submit_bio(write_bio);
260 }
261 }
262
263 /* Wait for completion of all IO's*/
264 return cio_await_completion(cio);
265
266 err_read_bio:
267 __free_page(token);
268 err_token:
269 rlist[ri].comp_len = min_t(sector_t, rlist[ri].comp_len, (rlist[ri].len - remaining));
270
271 cio->io_err = ret;
272 return cio_await_completion(cio);
273 }
274
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
More information about the Linux-nvme
mailing list