[RFC 3/7] dma: Add function for drivers to know if allowing blocking is useful

mhkelley58 at gmail.com mhkelley58 at gmail.com
Thu Aug 22 11:37:14 PDT 2024


From: Michael Kelley <mhklinux at outlook.com>

With the addition of swiotlb throttling functionality, storage
device drivers may want to know whether using the DMA_ATTR_MAY_BLOCK
attribute is useful. In a CoCo VM or environment where swiotlb=force
is used, the MAY_BLOCK attribute enables swiotlb throttling. But if
throttling is not enable or useful, storage device drivers probably
do not want to set BLK_MQ_F_BLOCKING at the blk-mq request queue level.

Add function dma_recommend_may_block() that indicates whether
the underlying implementation of the DMA map calls would benefit
from allowing blocking. If the kernel was built with
CONFIG_SWIOTLB_THROTTLE, and swiotlb=force is set (on the kernel
command line or due to being a CoCo VM), this function returns
true. Otherwise it returns false.

Signed-off-by: Michael Kelley <mhklinux at outlook.com>
---
 include/linux/dma-mapping.h |  5 +++++
 kernel/dma/direct.c         |  6 ++++++
 kernel/dma/direct.h         |  1 +
 kernel/dma/mapping.c        | 10 ++++++++++
 4 files changed, 22 insertions(+)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 7b78294813be..ec2edf068218 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -145,6 +145,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 u64 dma_get_required_mask(struct device *dev);
 bool dma_addressing_limited(struct device *dev);
+bool dma_recommend_may_block(struct device *dev);
 size_t dma_max_mapping_size(struct device *dev);
 size_t dma_opt_mapping_size(struct device *dev);
 unsigned long dma_get_merge_boundary(struct device *dev);
@@ -252,6 +253,10 @@ static inline bool dma_addressing_limited(struct device *dev)
 {
 	return false;
 }
+static inline bool dma_recommend_may_block(struct device *dev)
+{
+	return false;
+}
 static inline size_t dma_max_mapping_size(struct device *dev)
 {
 	return 0;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 80e03c0838d4..34d14e4ace64 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -649,6 +649,12 @@ bool dma_direct_all_ram_mapped(struct device *dev)
 				      check_ram_in_range_map);
 }
 
+bool dma_direct_recommend_may_block(struct device *dev)
+{
+	return IS_ENABLED(CONFIG_SWIOTLB_THROTTLE) &&
+			is_swiotlb_force_bounce(dev);
+}
+
 size_t dma_direct_max_mapping_size(struct device *dev)
 {
 	/* If SWIOTLB is active, use its maximum mapping size */
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index d2c0b7e632fc..63516a540276 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -21,6 +21,7 @@ bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 		enum dma_data_direction dir, unsigned long attrs);
 bool dma_direct_all_ram_mapped(struct device *dev);
+bool dma_direct_recommend_may_block(struct device *dev);
 size_t dma_direct_max_mapping_size(struct device *dev);
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b1c18058d55f..832982bafd5a 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -858,6 +858,16 @@ bool dma_addressing_limited(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dma_addressing_limited);
 
+bool dma_recommend_may_block(struct device *dev)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (dma_map_direct(dev, ops))
+		return dma_direct_recommend_may_block(dev);
+	return false;
+}
+EXPORT_SYMBOL_GPL(dma_recommend_may_block);
+
 size_t dma_max_mapping_size(struct device *dev)
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
-- 
2.25.1




More information about the Linux-nvme mailing list