[PATCH v2 18/37] kasan: separate metadata_fetch_row for each mode
Andrey Konovalov
andreyknvl at google.com
Tue Sep 15 17:16:00 EDT 2020
This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.
Rework print_memory_metadata() to make it agnostic with regard to the
way metadata is stored. Allow providing a separate metadata_fetch_row()
implementation for each KASAN mode. Hardware tag-based KASAN will provide
its own implementation that doesn't use shadow memory.
No functional changes for software modes.
Signed-off-by: Andrey Konovalov <andreyknvl at google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino at arm.com>
---
Change-Id: I5b0ed1d079ea776e620beca6a529a861e7dced95
---
mm/kasan/kasan.h | 8 ++++++
mm/kasan/report.c | 56 +++++++++++++++++++--------------------
mm/kasan/report_generic.c | 5 ++++
mm/kasan/report_tags.c | 5 ++++
4 files changed, 45 insertions(+), 29 deletions(-)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 38fa4c202e9a..1d3c7c6ce771 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -56,6 +56,13 @@
#define KASAN_ABI_VERSION 1
#endif
+/* Metadata layout customization. */
+#define META_BYTES_PER_BLOCK 1
+#define META_BLOCKS_PER_ROW 16
+#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
+#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
+#define META_ROWS_AROUND_ADDR 2
+
struct kasan_access_info {
const void *access_addr;
const void *first_bad_addr;
@@ -167,6 +174,7 @@ bool check_invalid_free(void *addr);
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);
+void metadata_fetch_row(char *buffer, void *row);
#ifdef CONFIG_KASAN_STACK_ENABLE
void print_address_stack_frame(const void *addr);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 6306673e7062..c904edab33b8 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -36,12 +36,6 @@
#include "kasan.h"
#include "../slab.h"
-/* Metadata layout customization. */
-#define META_BYTES_PER_BLOCK 1
-#define META_BLOCKS_PER_ROW 16
-#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
-#define META_ROWS_AROUND_ADDR 2
-
static unsigned long kasan_flags;
#define KASAN_BIT_REPORTED 0
@@ -241,55 +235,59 @@ static void print_address_description(void *addr, u8 tag)
print_address_stack_frame(addr);
}
-static bool row_is_guilty(const void *row, const void *guilty)
+static bool meta_row_is_guilty(const void *row, const void *addr)
{
- return (row <= guilty) && (guilty < row + META_BYTES_PER_ROW);
+ return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
}
-static int shadow_pointer_offset(const void *row, const void *shadow)
+static int meta_pointer_offset(const void *row, const void *addr)
{
- /* The length of ">ff00ff00ff00ff00: " is
- * 3 + (BITS_PER_LONG/8)*2 chars.
+ /*
+ * Memory state around the buggy address:
+ * ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
+ * ...
+ *
+ * The length of ">ff00ff00ff00ff00: " is
+ * 3 + (BITS_PER_LONG / 8) * 2 chars.
+ * The length of each granule metadata is 2 bytes
+ * plus 1 byte for space.
*/
- return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
- (shadow - row) / META_BYTES_PER_BLOCK + 1;
+ return 3 + (BITS_PER_LONG / 8) * 2 +
+ (addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
}
static void print_memory_metadata(const void *addr)
{
int i;
- const void *shadow = kasan_mem_to_shadow(addr);
- const void *shadow_row;
+ void *row;
- shadow_row = (void *)round_down((unsigned long)shadow,
- META_BYTES_PER_ROW)
- - META_ROWS_AROUND_ADDR * META_BYTES_PER_ROW;
+ row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
+ - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
pr_err("Memory state around the buggy address:\n");
for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
- const void *kaddr = kasan_shadow_to_mem(shadow_row);
- char buffer[4 + (BITS_PER_LONG/8)*2];
- char shadow_buf[META_BYTES_PER_ROW];
+ char buffer[4 + (BITS_PER_LONG / 8) * 2];
+ char metadata[META_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer),
- (i == 0) ? ">%px: " : " %px: ", kaddr);
+ (i == 0) ? ">%px: " : " %px: ", row);
+
/*
* We should not pass a shadow pointer to generic
* function, because generic functions may try to
* access kasan mapping for the passed address.
*/
- memcpy(shadow_buf, shadow_row, META_BYTES_PER_ROW);
+ metadata_fetch_row(&metadata[0], row);
+
print_hex_dump(KERN_ERR, buffer,
DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
- shadow_buf, META_BYTES_PER_ROW, 0);
+ metadata, META_BYTES_PER_ROW, 0);
- if (row_is_guilty(shadow_row, shadow))
- pr_err("%*c\n",
- shadow_pointer_offset(shadow_row, shadow),
- '^');
+ if (meta_row_is_guilty(row, addr))
+ pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
- shadow_row += META_BYTES_PER_ROW;
+ row += META_MEM_BYTES_PER_ROW;
}
}
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 29d30fae9421..6524651b5d2e 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -127,6 +127,11 @@ const char *get_bug_type(struct kasan_access_info *info)
return get_wild_bug_type(info);
}
+void metadata_fetch_row(char *buffer, void *row)
+{
+ memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
+}
+
#ifdef CONFIG_KASAN_STACK_ENABLE
static bool __must_check tokenize_frame_descr(const char **frame_descr,
char *token, size_t max_tok_len,
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 6ddb55676a7c..4060d0503462 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -85,6 +85,11 @@ void *find_first_bad_addr(void *addr, size_t size)
return p;
}
+void metadata_fetch_row(char *buffer, void *row)
+{
+ memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
+}
+
void print_tags(u8 addr_tag, const void *addr)
{
u8 *shadow = (u8 *)kasan_mem_to_shadow(addr);
--
2.28.0.618.gf4bc123cb7-goog
More information about the linux-arm-kernel
mailing list