[PATCH v3 8/8] Filter amdgpu mm pages

Tao Liu ltao at redhat.com
Mon Jan 19 18:55:00 PST 2026


This patch will introduce amdgpu mm page filtering extension, those mm pages
allocated to amdgpu will be discarded from vmcore, in order to shrink
vmcore size since mm pages allocated to amdgpu are useless to kernel
crash and may contain sensitive data.

Signed-off-by: Tao Liu <ltao at redhat.com>
---
 extensions/Makefile        |  4 +-
 extensions/amdgpu_filter.c | 90 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 92 insertions(+), 2 deletions(-)
 create mode 100644 extensions/amdgpu_filter.c

diff --git a/extensions/Makefile b/extensions/Makefile
index afbc61e..81dda31 100644
--- a/extensions/Makefile
+++ b/extensions/Makefile
@@ -1,9 +1,9 @@
 CC ?= gcc
-CONTRIB_SO :=
+CONTRIB_SO := amdgpu_filter.so
 
 all: $(CONTRIB_SO)
 
-$(CONTRIB_SO): %.so: %.c
+$(CONTRIB_SO): %.so: %.c maple_tree.c
 	$(CC) -O2 -g -fPIC -shared -o $@ $^
 
 clean:
diff --git a/extensions/amdgpu_filter.c b/extensions/amdgpu_filter.c
new file mode 100644
index 0000000..433cc0b
--- /dev/null
+++ b/extensions/amdgpu_filter.c
@@ -0,0 +1,90 @@
+#include <stdio.h>
+#include "maple_tree.h"
+#include "../makedumpfile.h"
+#include "../btf_info.h"
+#include "../kallsyms.h"
+#include "../erase_info.h"
+
+#define MEMBER_OFF(S, M) \
+	GET_STRUCT_MEMBER_MOFF(S, M) / 8
+#define MOD_MEMBER_OFF(MOD, S, M) \
+	GET_MOD_STRUCT_MEMBER_MOFF(MOD, S, M) / 8
+
+static void do_filter(void)
+{
+	uint64_t init_task, list, list_offset, amdgpu_gem_vm_ops;
+	uint64_t mm, vm_ops, tbo, ttm, num_pages, pages, pfn, vmemmap_base;
+	struct struct_member_info smi;
+	int array_len;
+	unsigned long *array_out;
+	init_task = get_kallsyms_value_by_name("init_task");
+	amdgpu_gem_vm_ops = get_kallsyms_value_by_name("amdgpu_gem_vm_ops");
+
+	INIT_STRUCT_MEMBER(task_struct, tasks);
+	INIT_STRUCT_MEMBER(task_struct, mm);
+	INIT_STRUCT_MEMBER(mm_struct, mm_mt);
+	INIT_STRUCT_MEMBER(vm_area_struct, vm_ops);
+	INIT_STRUCT_MEMBER(vm_area_struct, vm_private_data);
+	INIT_MOD_STRUCT_MEMBER(amdgpu, ttm_buffer_object, ttm);
+	INIT_MOD_STRUCT_MEMBER(amdgpu, ttm_tt, pages);
+	INIT_MOD_STRUCT_MEMBER(amdgpu, ttm_tt, num_pages);
+	INIT_STRUCT(page);
+
+	list = init_task + MEMBER_OFF(task_struct, tasks);
+
+	do {
+		readmem(VADDR, list - MEMBER_OFF(task_struct, tasks) + 
+				MEMBER_OFF(task_struct, mm),
+			&mm, sizeof(uint64_t));
+		if (!mm) {
+			list = next_list(list);
+			continue;
+		}
+
+		array_out = mt_dump(mm + MEMBER_OFF(mm_struct, mm_mt), &array_len);
+		if (!array_out)
+			return;
+
+		for (int i = 0; i < array_len; i++) {
+			num_pages = 0;
+			readmem(VADDR, array_out[i] + MEMBER_OFF(vm_area_struct, vm_ops),
+				&vm_ops, GET_STRUCT_MEMBER_MSIZE(vm_area_struct, vm_ops));
+			if (vm_ops == amdgpu_gem_vm_ops) {
+				readmem(VADDR, array_out[i] +
+					MEMBER_OFF(vm_area_struct, vm_private_data),
+					&tbo, GET_STRUCT_MEMBER_MSIZE(vm_area_struct, vm_private_data));
+				readmem(VADDR, tbo + MOD_MEMBER_OFF(amdgpu, ttm_buffer_object, ttm),
+					&ttm, GET_MOD_STRUCT_MEMBER_MSIZE(amdgpu, ttm_buffer_object, ttm));
+				if (ttm) {
+					readmem(VADDR, ttm + MOD_MEMBER_OFF(amdgpu, ttm_tt, num_pages),
+						&num_pages, GET_MOD_STRUCT_MEMBER_MSIZE(amdgpu, ttm_tt, num_pages));
+					readmem(VADDR, ttm + MOD_MEMBER_OFF(amdgpu, ttm_tt, pages),
+						&pages, GET_MOD_STRUCT_MEMBER_MSIZE(amdgpu, ttm_tt, pages));
+					readmem(VADDR, pages, &pages, sizeof(unsigned long));
+					readmem(VADDR, get_kallsyms_value_by_name("vmemmap_base"),
+						&vmemmap_base, sizeof(unsigned long));
+					pfn = (pages - vmemmap_base) / GET_STRUCT_SSIZE(page);
+					update_filter_pages_info(pfn, num_pages, true);
+				}
+			}
+		}
+
+		free(array_out);
+		list = next_list(list);
+	} while (list != init_task + MEMBER_OFF(task_struct, tasks));
+
+	return;
+}
+
+__attribute__((constructor))
+static void amdgpu_mmpage_filter_constructor(void)
+{
+	if (!maple_init())
+		goto out;
+	do_filter();
+out:
+	return;
+}
+
+__attribute__((destructor))
+static void amdgpu_mmpage_filter_destructor(void) {}
\ No newline at end of file
-- 
2.47.0




More information about the kexec mailing list