[xilinx-xlnx:xlnx_rebase_v5.10 140/1981] drivers/dma/xilinx/axidmatest.c:339:54: warning: implicit conversion from 'enum dma_transfer_direction' to 'enum dma_data_direction'

kernel test robot lkp at intel.com
Fri Dec 17 15:06:01 PST 2021


tree:   https://github.com/Xilinx/linux-xlnx xlnx_rebase_v5.10
head:   87ec9a2d98a7a7dfc98b57348a0ec310fd170e4b
commit: 901cbedb0639634b09a593eebdea3a1d5f3e2767 [140/1981] dmaengine: xilinx: Add axidmatest test client code
config: arm-allyesconfig (https://download.01.org/0day-ci/archive/20211218/202112180624.lLBY2qeG-lkp@intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/Xilinx/linux-xlnx/commit/901cbedb0639634b09a593eebdea3a1d5f3e2767
        git remote add xilinx-xlnx https://github.com/Xilinx/linux-xlnx
        git fetch --no-tags xilinx-xlnx xlnx_rebase_v5.10
        git checkout 901cbedb0639634b09a593eebdea3a1d5f3e2767
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=arm SHELL=/bin/bash drivers/dma/xilinx/ drivers/hsi/clients/ drivers/mtd/nand/raw/ drivers/pinctrl/aspeed/ drivers/remoteproc/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp at intel.com>

All warnings (new ones prefixed by >>):

   drivers/dma/xilinx/axidmatest.c: In function 'dmatest_slave_func':
   drivers/dma/xilinx/axidmatest.c:294:17: warning: ISO C90 forbids variable length array 'dma_srcs' [-Wvla]
     294 |                 dma_addr_t dma_srcs[src_cnt];
         |                 ^~~~~~~~~~
   drivers/dma/xilinx/axidmatest.c:295:17: warning: ISO C90 forbids variable length array 'dma_dsts' [-Wvla]
     295 |                 dma_addr_t dma_dsts[dst_cnt];
         |                 ^~~~~~~~~~
   drivers/dma/xilinx/axidmatest.c:302:24: warning: ISO C90 forbids variable length array 'tx_sg' [-Wvla]
     302 |                 struct scatterlist tx_sg[bd_cnt];
         |                        ^~~~~~~~~~~
   drivers/dma/xilinx/axidmatest.c:303:24: warning: ISO C90 forbids variable length array 'rx_sg' [-Wvla]
     303 |                 struct scatterlist rx_sg[bd_cnt];
         |                        ^~~~~~~~~~~
   In file included from include/linux/dma/xilinx_dma.h:11,
                    from drivers/dma/xilinx/axidmatest.c:24:
>> drivers/dma/xilinx/axidmatest.c:339:54: warning: implicit conversion from 'enum dma_transfer_direction' to 'enum dma_data_direction' [-Wenum-conversion]
     339 |                                                      DMA_MEM_TO_DEV);
         |                                                      ^~~~~~~~~~~~~~
   include/linux/dma-mapping.h:383:66: note: in definition of macro 'dma_map_single'
     383 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
         |                                                                  ^
   drivers/dma/xilinx/axidmatest.c:369:50: warning: implicit conversion from 'enum dma_transfer_direction' to 'enum dma_data_direction' [-Wenum-conversion]
     369 |                                                  DMA_MEM_TO_DEV);
         |                                                  ^~~~~~~~~~~~~~
   include/linux/dma-mapping.h:384:70: note: in definition of macro 'dma_unmap_single'
     384 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
         |                                                                      ^


vim +339 drivers/dma/xilinx/axidmatest.c

   223	
   224	/* Function for slave transfers
   225	 * Each thread requires 2 channels, one for transmit, and one for receive
   226	 */
   227	static int dmatest_slave_func(void *data)
   228	{
   229		struct dmatest_slave_thread	*thread = data;
   230		struct dma_chan *tx_chan;
   231		struct dma_chan *rx_chan;
   232		const char *thread_name;
   233		unsigned int src_off, dst_off, len;
   234		unsigned int error_count;
   235		unsigned int failed_tests = 0;
   236		unsigned int total_tests = 0;
   237		dma_cookie_t tx_cookie;
   238		dma_cookie_t rx_cookie;
   239		enum dma_status status;
   240		enum dma_ctrl_flags flags;
   241		int ret;
   242		int src_cnt;
   243		int dst_cnt;
   244		int bd_cnt = 11;
   245		int i;
   246	
   247		ktime_t	ktime, start, diff;
   248		ktime_t	filltime = 0;
   249		ktime_t	comparetime = 0;
   250		s64 runtime = 0;
   251		unsigned long long total_len = 0;
   252		thread_name = current->comm;
   253		ret = -ENOMEM;
   254	
   255	
   256		/* Ensure that all previous reads are complete */
   257		smp_rmb();
   258		tx_chan = thread->tx_chan;
   259		rx_chan = thread->rx_chan;
   260		dst_cnt = bd_cnt;
   261		src_cnt = bd_cnt;
   262	
   263		thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
   264		if (!thread->srcs)
   265			goto err_srcs;
   266		for (i = 0; i < src_cnt; i++) {
   267			thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
   268			if (!thread->srcs[i])
   269				goto err_srcbuf;
   270		}
   271		thread->srcs[i] = NULL;
   272	
   273		thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
   274		if (!thread->dsts)
   275			goto err_dsts;
   276		for (i = 0; i < dst_cnt; i++) {
   277			thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
   278			if (!thread->dsts[i])
   279				goto err_dstbuf;
   280		}
   281		thread->dsts[i] = NULL;
   282	
   283		set_user_nice(current, 10);
   284	
   285		flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
   286	
   287		ktime = ktime_get();
   288		while (!kthread_should_stop() &&
   289		       !(iterations && total_tests >= iterations)) {
   290			struct dma_device *tx_dev = tx_chan->device;
   291			struct dma_device *rx_dev = rx_chan->device;
   292			struct dma_async_tx_descriptor *txd = NULL;
   293			struct dma_async_tx_descriptor *rxd = NULL;
   294			dma_addr_t dma_srcs[src_cnt];
   295			dma_addr_t dma_dsts[dst_cnt];
   296			struct completion rx_cmp;
   297			struct completion tx_cmp;
   298			unsigned long rx_tmo =
   299					msecs_to_jiffies(300000); /* RX takes longer */
   300			unsigned long tx_tmo = msecs_to_jiffies(30000);
   301			u8 align = 0;
 > 302			struct scatterlist tx_sg[bd_cnt];
   303			struct scatterlist rx_sg[bd_cnt];
   304	
   305			total_tests++;
   306	
   307			/* honor larger alignment restrictions */
   308			align = tx_dev->copy_align;
   309			if (rx_dev->copy_align > align)
   310				align = rx_dev->copy_align;
   311	
   312			if (1 << align > test_buf_size) {
   313				pr_err("%u-byte buffer too small for %d-byte alignment\n",
   314				       test_buf_size, 1 << align);
   315				break;
   316			}
   317	
   318			len = dmatest_random() % test_buf_size + 1;
   319			len = (len >> align) << align;
   320			if (!len)
   321				len = 1 << align;
   322			total_len += len;
   323			src_off = dmatest_random() % (test_buf_size - len + 1);
   324			dst_off = dmatest_random() % (test_buf_size - len + 1);
   325	
   326			src_off = (src_off >> align) << align;
   327			dst_off = (dst_off >> align) << align;
   328	
   329			start = ktime_get();
   330			dmatest_init_srcs(thread->srcs, src_off, len);
   331			dmatest_init_dsts(thread->dsts, dst_off, len);
   332			diff = ktime_sub(ktime_get(), start);
   333			filltime = ktime_add(filltime, diff);
   334	
   335			for (i = 0; i < src_cnt; i++) {
   336				u8 *buf = thread->srcs[i] + src_off;
   337	
   338				dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
 > 339							     DMA_MEM_TO_DEV);
   340			}
   341	
   342			for (i = 0; i < dst_cnt; i++) {
   343				dma_dsts[i] = dma_map_single(rx_dev->dev,
   344							     thread->dsts[i],
   345							     test_buf_size,
   346							     DMA_BIDIRECTIONAL);
   347			}
   348	
   349			sg_init_table(tx_sg, bd_cnt);
   350			sg_init_table(rx_sg, bd_cnt);
   351	
   352			for (i = 0; i < bd_cnt; i++) {
   353				sg_dma_address(&tx_sg[i]) = dma_srcs[i];
   354				sg_dma_address(&rx_sg[i]) = dma_dsts[i] + dst_off;
   355	
   356				sg_dma_len(&tx_sg[i]) = len;
   357				sg_dma_len(&rx_sg[i]) = len;
   358			}
   359	
   360			rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, bd_cnt,
   361					DMA_DEV_TO_MEM, flags, NULL);
   362	
   363			txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, bd_cnt,
   364					DMA_MEM_TO_DEV, flags, NULL);
   365	
   366			if (!rxd || !txd) {
   367				for (i = 0; i < src_cnt; i++)
   368					dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
   369							 DMA_MEM_TO_DEV);
   370				for (i = 0; i < dst_cnt; i++)
   371					dma_unmap_single(rx_dev->dev, dma_dsts[i],
   372							 test_buf_size,
   373							 DMA_BIDIRECTIONAL);
   374				pr_warn("%s: #%u: prep error with src_off=0x%x ",
   375					thread_name, total_tests - 1, src_off);
   376				pr_warn("dst_off=0x%x len=0x%x\n",
   377					dst_off, len);
   378				msleep(100);
   379				failed_tests++;
   380				continue;
   381			}
   382	
   383			init_completion(&rx_cmp);
   384			rxd->callback = dmatest_slave_rx_callback;
   385			rxd->callback_param = &rx_cmp;
   386			rx_cookie = rxd->tx_submit(rxd);
   387	
   388			init_completion(&tx_cmp);
   389			txd->callback = dmatest_slave_tx_callback;
   390			txd->callback_param = &tx_cmp;
   391			tx_cookie = txd->tx_submit(txd);
   392	
   393			if (dma_submit_error(rx_cookie) ||
   394			    dma_submit_error(tx_cookie)) {
   395				pr_warn("%s: #%u: submit error %d/%d with src_off=0x%x ",
   396					thread_name, total_tests - 1,
   397					rx_cookie, tx_cookie, src_off);
   398				pr_warn("dst_off=0x%x len=0x%x\n",
   399					dst_off, len);
   400				msleep(100);
   401				failed_tests++;
   402				continue;
   403			}
   404			dma_async_issue_pending(tx_chan);
   405			dma_async_issue_pending(rx_chan);
   406	
   407			tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
   408	
   409			status = dma_async_is_tx_complete(tx_chan, tx_cookie,
   410							  NULL, NULL);
   411	
   412			if (tx_tmo == 0) {
   413				pr_warn("%s: #%u: tx test timed out\n",
   414					thread_name, total_tests - 1);
   415				failed_tests++;
   416				continue;
   417			} else if (status != DMA_COMPLETE) {
   418				pr_warn("%s: #%u: tx got completion callback, ",
   419					thread_name, total_tests - 1);
   420				pr_warn("but status is \'%s\'\n",
   421					status == DMA_ERROR ? "error" :
   422					"in progress");
   423				failed_tests++;
   424				continue;
   425			}
   426	
   427			rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
   428			status = dma_async_is_tx_complete(rx_chan, rx_cookie,
   429							  NULL, NULL);
   430	
   431			if (rx_tmo == 0) {
   432				pr_warn("%s: #%u: rx test timed out\n",
   433					thread_name, total_tests - 1);
   434				failed_tests++;
   435				continue;
   436			} else if (status != DMA_COMPLETE) {
   437				pr_warn("%s: #%u: rx got completion callback, ",
   438					thread_name, total_tests - 1);
   439				pr_warn("but status is \'%s\'\n",
   440					status == DMA_ERROR ? "error" :
   441					"in progress");
   442				failed_tests++;
   443				continue;
   444			}
   445	
   446			/* Unmap by myself */
   447			for (i = 0; i < dst_cnt; i++)
   448				dma_unmap_single(rx_dev->dev, dma_dsts[i],
   449						 test_buf_size, DMA_BIDIRECTIONAL);
   450	
   451			error_count = 0;
   452			start = ktime_get();
   453			pr_debug("%s: verifying source buffer...\n", thread_name);
   454			error_count += dmatest_verify(thread->srcs, 0, src_off,
   455					0, PATTERN_SRC, true);
   456			error_count += dmatest_verify(thread->srcs, src_off,
   457					src_off + len, src_off,
   458					PATTERN_SRC | PATTERN_COPY, true);
   459			error_count += dmatest_verify(thread->srcs, src_off + len,
   460					test_buf_size, src_off + len,
   461					PATTERN_SRC, true);
   462	
   463			pr_debug("%s: verifying dest buffer...\n",
   464				 thread->task->comm);
   465			error_count += dmatest_verify(thread->dsts, 0, dst_off,
   466					0, PATTERN_DST, false);
   467			error_count += dmatest_verify(thread->dsts, dst_off,
   468					dst_off + len, src_off,
   469					PATTERN_SRC | PATTERN_COPY, false);
   470			error_count += dmatest_verify(thread->dsts, dst_off + len,
   471					test_buf_size, dst_off + len,
   472					PATTERN_DST, false);
   473			diff = ktime_sub(ktime_get(), start);
   474			comparetime = ktime_add(comparetime, diff);
   475	
   476			if (error_count) {
   477				pr_warn("%s: #%u: %u errors with ",
   478					thread_name, total_tests - 1, error_count);
   479				pr_warn("src_off=0x%x dst_off=0x%x len=0x%x\n",
   480					src_off, dst_off, len);
   481				failed_tests++;
   482			} else {
   483				pr_debug("%s: #%u: No errors with ",
   484					 thread_name, total_tests - 1);
   485				pr_debug("src_off=0x%x dst_off=0x%x len=0x%x\n",
   486					 src_off, dst_off, len);
   487			}
   488		}
   489	
   490		ktime = ktime_sub(ktime_get(), ktime);
   491		ktime = ktime_sub(ktime, comparetime);
   492		ktime = ktime_sub(ktime, filltime);
   493		runtime = ktime_to_us(ktime);
   494	
   495		ret = 0;
   496		for (i = 0; thread->dsts[i]; i++)
   497			kfree(thread->dsts[i]);
   498	err_dstbuf:
   499		kfree(thread->dsts);
   500	err_dsts:
   501		for (i = 0; thread->srcs[i]; i++)
   502			kfree(thread->srcs[i]);
   503	err_srcbuf:
   504		kfree(thread->srcs);
   505	err_srcs:
   506		pr_notice("%s: terminating after %u tests, %u failures %llu iops %llu KB/s (status %d)\n",
   507			  thread_name, total_tests, failed_tests,
   508			  dmatest_persec(runtime, total_tests),
   509			  dmatest_KBs(runtime, total_len), ret);
   510	
   511		thread->done = true;
   512		wake_up(&thread_wait);
   513	
   514		return ret;
   515	}
   516	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org



More information about the linux-arm-kernel mailing list