[PATCH 3/4] dma: debug: poison DMA buffers with KASAN while owned by device
Ahmad Fatoum
a.fatoum at pengutronix.de
Tue Sep 10 04:48:31 PDT 2024
Accessing a DMA buffer while owned by device is racy: The device
may have not written any data yet and in the case it has written data,
it may be hidden by stale cache lines caused by CPU's speculation in
the meantime. Only after sync'ing the buffer to the CPU are caches, if
any, invalidated, so the CPU sees what was written by the device.
It's therefore sensible to poison access to a buffer while owned by the
device and only unpoison it after ownership has been transferred to the
CPU.
Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
drivers/dma/debug.c | 60 ++++++++++++++++++++++++++++++++++++--
include/linux/kasan.h | 2 +-
lib/kasan/generic_report.c | 4 +--
3 files changed, 61 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/debug.c b/drivers/dma/debug.c
index 7bf0be1e1ff3..fd279be5be02 100644
--- a/drivers/dma/debug.c
+++ b/drivers/dma/debug.c
@@ -3,14 +3,36 @@
#include <dma.h>
#include <linux/list.h>
#include <linux/printk.h>
+#include <linux/align.h>
+#include <linux/kasan.h>
#include "debug.h"
+/**
+ * DMA_KASAN_ALIGN() - Align to KASAN granule size
+ * @x: value to be aligned
+ *
+ * kasan_unpoison_shadow will poison the bytes after it when the size
+ * is not a multiple of the granule size. We thus always align the sizes
+ * here. This allows unpoisoning a smaller buffer overlapping a bigger
+ * buffer previously unpoisoned without KASAN detecting an out-of-bounds
+ * memory access.
+ *
+ * A more accurate reflection of the hardware would be to align to
+ * DMA_ALIGNMENT. We intentionally don't do this here as choosing the lowest
+ * possible alignment allows us to catch more issues that may not pop up
+ * when testing on platforms with higher DMA_ALIGNMENT.
+ *
+ * @returns the value after alignment
+ */
+#define DMA_KASAN_ALIGN(x) ALIGN(x, 1 << KASAN_SHADOW_SCALE_SHIFT)
+
static LIST_HEAD(dma_mappings);
struct dma_debug_entry {
struct list_head list;
struct device *dev;
dma_addr_t dev_addr;
+ const void *cpu_addr;
size_t size;
int direction;
};
@@ -103,6 +125,17 @@ dma_debug_entry_find(struct device *dev, dma_addr_t dev_addr, size_t size)
return NULL;
}
+static const void *dma_debug_entry_cpu_addr(struct dma_debug_entry *entry,
+ dma_addr_t dma_handle)
+{
+ ptrdiff_t offset = dma_handle - entry->dev_addr;
+
+ if (offset < 0)
+ return NULL;
+
+ return entry->cpu_addr + offset;
+}
+
void debug_dma_map(struct device *dev, void *addr,
size_t size,
int direction, dma_addr_t dev_addr)
@@ -120,6 +153,7 @@ void debug_dma_map(struct device *dev, void *addr,
entry->dev = dev;
entry->dev_addr = dev_addr;
+ entry->cpu_addr = addr;
entry->size = size;
entry->direction = direction;
@@ -130,12 +164,15 @@ void debug_dma_map(struct device *dev, void *addr,
if (!IS_ALIGNED(dev_addr, DMA_ALIGNMENT))
dma_dev_warn(dev, "Mapping insufficiently aligned %s buffer 0x%llx+0x%zx: %u bytes required!\n",
dir2name[direction], (u64)dev_addr, size, DMA_ALIGNMENT);
+
+ kasan_poison_shadow(addr, DMA_KASAN_ALIGN(size), KASAN_DMA_DEV_MAPPED);
}
void debug_dma_unmap(struct device *dev, dma_addr_t addr,
size_t size, int direction)
{
struct dma_debug_entry *entry;
+ const void *cpu_addr;
entry = dma_debug_entry_find(dev, addr, size);
if (!entry) {
@@ -153,6 +190,9 @@ void debug_dma_unmap(struct device *dev, dma_addr_t addr,
dir2name[direction]);
dma_debug(entry, "deallocating\n");
+ cpu_addr = dma_debug_entry_cpu_addr(entry, addr);
+ if (cpu_addr)
+ kasan_unpoison_shadow(cpu_addr, DMA_KASAN_ALIGN(size));
list_del(&entry->list);
free(entry);
}
@@ -162,11 +202,19 @@ void debug_dma_sync_single_for_cpu(struct device *dev,
int direction)
{
struct dma_debug_entry *entry;
+ const void *cpu_addr;
entry = dma_debug_entry_find(dev, dma_handle, size);
- if (!entry)
+ if (!entry) {
dma_dev_warn(dev, "sync for CPU of never-mapped %s buffer 0x%llx+0x%zx!\n",
dir2name[direction], (u64)dma_handle, size);
+ return;
+ }
+
+ cpu_addr = dma_debug_entry_cpu_addr(entry, dma_handle);
+ if (cpu_addr) {
+ kasan_unpoison_shadow(cpu_addr, DMA_KASAN_ALIGN(size));
+ }
}
void debug_dma_sync_single_for_device(struct device *dev,
@@ -174,6 +222,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
size_t size, int direction)
{
struct dma_debug_entry *entry;
+ const void *cpu_addr;
/*
* If dma_map_single was omitted, CPU cache may contain dirty cache lines
@@ -182,7 +231,14 @@ void debug_dma_sync_single_for_device(struct device *dev,
* corruption
*/
entry = dma_debug_entry_find(dev, dma_handle, size);
- if (!entry)
+ if (!entry) {
dma_dev_warn(dev, "Syncing for device of never-mapped %s buffer 0x%llx+0x%zx!\n",
dir2name[direction], (u64)dma_handle, size);
+ return;
+ }
+
+ cpu_addr = dma_debug_entry_cpu_addr(entry, dma_handle);
+ if (cpu_addr) {
+ kasan_poison_shadow(cpu_addr, DMA_KASAN_ALIGN(size), KASAN_DMA_DEV_MAPPED);
+ }
}
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 7812e0fa805b..2dea347c40ae 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -25,7 +25,7 @@
#define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
-#define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
+#define KASAN_DMA_DEV_MAPPED 0xF8 /* DMA-mapped buffer currently owned by device */
/*
* Stack redzone shadow values
diff --git a/lib/kasan/generic_report.c b/lib/kasan/generic_report.c
index 1cc5829e8d7f..83d55b12c070 100644
--- a/lib/kasan/generic_report.c
+++ b/lib/kasan/generic_report.c
@@ -74,8 +74,8 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
break;
- case KASAN_VMALLOC_INVALID:
- bug_type = "vmalloc-out-of-bounds";
+ case KASAN_DMA_DEV_MAPPED:
+ bug_type = "dma-mapped-to-device";
break;
}
--
2.39.2
More information about the barebox
mailing list