[RFC PATCH v3 2/3] pmpool: Introduce persistent memory pool
Stanislav Kinsburskii
skinsburskii at linux.microsoft.com
Wed Oct 4 15:23:21 PDT 2023
This patch introduces a memory allocator specifically tailored for
persistent memory within the kernel. The allocator maintains
kernel-specific states like DMA passthrough device states, IOMMU state, and
more across kexec.
The current implementation provides a foundation for custom solutions that
may be developed in the future. Although the design is kept concise and
straightforward to encourage discussion and feedback, it remains fully
functional.
The persistent memory pool builds upon the continuous memory allocator
(CMA) and ensures CMA state persistency across kexec by incorporating the
CMA bitmap into the memory region.
Potential applications include:
1. Enabling various in-kernel entities to allocate persistent pages from
a unified memory pool, obviating the need for reserving multiple
regions.
2. For in-kernel components that need the allocation address to be
retained on kernel kexec, this address can be exposed to user space
and subsequently passed through the command line.
3. Distinct subsystems or drivers can set aside their region, allocating
a segment for their persistent memory pool, suitable for uses such as
file systems, key-value stores, and other applications.
Signed-off-by: Stanislav Kinsburskii <skinsburskii at linux.microsoft.com>
---
include/linux/pmpool.h | 22 +++++++++
mm/Kconfig | 8 +++
mm/Makefile | 1
mm/pmpool.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 146 insertions(+)
create mode 100644 include/linux/pmpool.h
create mode 100644 mm/pmpool.c
diff --git a/include/linux/pmpool.h b/include/linux/pmpool.h
new file mode 100644
index 000000000000..b41f16fa9660
--- /dev/null
+++ b/include/linux/pmpool.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PMPOOL_H
+#define _PMPOOL_H
+
+struct page;
+
+#if defined(CONFIG_PMPOOL)
+struct page *pmpool_alloc(unsigned long count);
+bool pmpool_release(struct page *pages, unsigned long count);
+#else
+static inline struct page *pmpool_alloc(unsigned long count)
+{
+ return NULL;
+}
+static inline bool pmpool_release(struct page *pages, unsigned long count)
+{
+ return false;
+}
+#endif
+
+#endif /* _PMPOOL_H */
diff --git a/mm/Kconfig b/mm/Kconfig
index 09130434e30d..e7c10094fb10 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -922,6 +922,14 @@ config CMA_AREAS
If unsure, leave the default value "7" in UMA and "19" in NUMA.
+config PMPOOL
+ bool "Persistent memory pool support"
+ select CMA
+ help
+ This option adds support for CMA-based persistent memory pool
+ feature, which provides pages allocation and freeing from a set of
+ persistent memory ranges, deposited to the memory pool.
+
config MEM_SOFT_DIRTY
bool "Track memory changes"
depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
diff --git a/mm/Makefile b/mm/Makefile
index 678530a07326..8d3579e58c2c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -139,3 +139,4 @@ obj-$(CONFIG_IO_MAPPING) += io-mapping.o
obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
+obj-$(CONFIG_PMPOOL) += pmpool.o
diff --git a/mm/pmpool.c b/mm/pmpool.c
new file mode 100644
index 000000000000..c74f09b99283
--- /dev/null
+++ b/mm/pmpool.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "pmpool: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cma.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/pmpool.h>
+
+#include "cma.h"
+
+struct pmpool {
+ struct resource resource;
+ struct cma *cma;
+};
+
+static struct pmpool *default_pmpool;
+
+bool pmpool_release(struct page *pages, unsigned long count)
+{
+ if (!default_pmpool)
+ return false;
+
+ return cma_release(default_pmpool->cma, pages, count);
+}
+
+struct page *pmpool_alloc(unsigned long count)
+{
+ if (!default_pmpool)
+ return NULL;
+
+ return cma_alloc(default_pmpool->cma, count, 0, true);
+}
+
+static void pmpool_cma_accomodate_bitmap(struct cma *cma)
+{
+ unsigned long bitmap_size;
+
+ bitmap_free(cma->bitmap);
+ cma->bitmap = phys_to_virt(PFN_PHYS(cma->base_pfn));
+
+ bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma));
+ memset(cma->bitmap, 0, bitmap_size);
+ bitmap_set(cma->bitmap, 0, PAGE_ALIGN(bitmap_size) >> PAGE_SHIFT);
+
+ pr_info("CMA bitmap moved to %#llx\n", virt_to_phys(cma->bitmap));
+}
+
+static int __init default_pmpool_fixup(void)
+{
+ if (!default_pmpool)
+ return 0;
+
+ if (insert_resource(&iomem_resource, &default_pmpool->resource))
+ pr_err("failed to insert resource\n");
+
+ pmpool_cma_accomodate_bitmap(default_pmpool->cma);
+ return 0;
+}
+postcore_initcall(default_pmpool_fixup);
+
+static int __init parse_pmpool_opt(char *str)
+{
+ static struct pmpool pmpool = {
+ .resource = {
+ .name = "Persistent Memory Pool",
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+ .desc = IORES_DESC_CXL
+ }
+ };
+ phys_addr_t base, size, end;
+ int err;
+
+ /* Format is pmpool=<base>,<size> */
+ base = memparse(str, &str);
+ size = memparse(str + 1, NULL);
+ end = base + size - 1;
+
+ err = memblock_is_region_reserved(base, size);
+ if (err) {
+ pr_err("memory block overlaps with another one: %d\n", err);
+ return 0;
+ }
+
+ err = memblock_reserve(base, size);
+ if (err) {
+ pr_err("failed to reserve memory block: %d\n", err);
+ return 0;
+ }
+
+ err = cma_init_reserved_mem(base, size, 0, "pmpool", &pmpool.cma);
+ if (err) {
+ pr_err("failed to initialize CMA: %d\n", err);
+ goto free_memblock;
+ }
+
+ pmpool.resource.start = base;
+ pmpool.resource.end = end;
+
+ pr_info("default memory pool is created: %#llx-%#llx\n",
+ base, end);
+
+ default_pmpool = &pmpool;
+
+ return 0;
+
+free_memblock:
+ memblock_phys_free(base, size);
+ return 0;
+}
+early_param("pmpool", parse_pmpool_opt);
More information about the kexec
mailing list