[PATCH v2 7/8] cache: store mapped regions directly in the cache
Petr Tesarik
ptesarik at suse.cz
Fri Mar 6 05:23:34 PST 2015
Avoid copying data between the mmapped region and the cache. To do that,
readmem() tries to map the page before reading it. The mmap path and
the read path are separated: mappage_elf() uses the mmap syscall, and
readpage_elf() uses the read syscall.
If mmap is successful, readmem() stores the mmap address in the cache
entry. Of course, the mapping must not be removed until the cache entry
is evicted, but the cache code has no knowledge about mmap. To solve that
in a flexible way, a discard callback is added to struct cache_entry and
called by cache eviction code.
Signed-off-by: Petr Tesarik <ptesarik at suse.cz>
---
cache.c | 3 ++
cache.h | 2 ++
makedumpfile.c | 105 ++++++++++++++++++++++++++++++++-------------------------
3 files changed, 64 insertions(+), 46 deletions(-)
diff --git a/cache.c b/cache.c
index 938eda6..963eb76 100644
--- a/cache.c
+++ b/cache.c
@@ -108,6 +108,8 @@ cache_alloc(unsigned long long paddr)
} else if (used.tail) {
entry = used.tail;
remove_entry(&used, entry);
+ if (entry->discard)
+ entry->discard(entry);
} else
return NULL;
@@ -115,6 +117,7 @@ cache_alloc(unsigned long long paddr)
entry->paddr = paddr;
entry->bufptr = cachebuf + idx * info->page_size;
entry->buflen = info->page_size;
+ entry->discard = NULL;
add_entry(&pending, entry);
return entry;
diff --git a/cache.h b/cache.h
index 792ba6c..c55cec4 100644
--- a/cache.h
+++ b/cache.h
@@ -24,6 +24,8 @@ struct cache_entry {
void *bufptr;
unsigned long buflen;
struct cache_entry *next, *prev;
+
+ void (*discard)(struct cache_entry *);
};
int cache_init(void);
diff --git a/makedumpfile.c b/makedumpfile.c
index f1aad08..827c36f 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -294,6 +294,12 @@ read_page_desc(unsigned long long paddr, page_desc_t *pd)
return TRUE;
}
+static void
+unmap_cache(struct cache_entry *entry)
+{
+ munmap(entry->bufptr, entry->buflen);
+}
+
static int
update_mmap_range(off_t offset, int initial) {
off_t start_offset, end_offset;
@@ -301,9 +307,6 @@ update_mmap_range(off_t offset, int initial) {
off_t max_offset = get_max_file_offset();
off_t pt_load_end = offset_to_pt_load_end(offset);
- munmap(info->mmap_buf,
- info->mmap_end_offset - info->mmap_start_offset);
-
/*
* offset for mmap() must be page aligned.
*/
@@ -357,29 +360,45 @@ initialize_mmap(void) {
return TRUE;
}
-static int
-read_with_mmap(off_t offset, void *bufptr, unsigned long size) {
- size_t read_size;
+static char *
+mappage_elf(unsigned long long paddr)
+{
+ off_t offset, offset2;
-next_region:
+ if (info->flag_usemmap != MMAP_ENABLE)
+ return NULL;
- if (!is_mapped_with_mmap(offset))
- if (!update_mmap_range(offset, 0))
- return FALSE;
+ offset = paddr_to_offset(paddr);
+ if (!offset)
+ return NULL;
+
+ offset2 = paddr_to_offset(paddr + info->page_size);
+ if (!offset2)
+ return NULL;
- read_size = MIN(info->mmap_end_offset - offset, size);
+ if (offset2 - offset != info->page_size)
+ return NULL;
- memcpy(bufptr, info->mmap_buf +
- (offset - info->mmap_start_offset), read_size);
+ if (!is_mapped_with_mmap(offset) &&
+ !update_mmap_range(offset, 0)) {
+ ERRMSG("Can't read the dump memory(%s) with mmap().\n",
+ info->name_memory);
- offset += read_size;
- bufptr += read_size;
- size -= read_size;
+ ERRMSG("This kernel might have some problems about mmap().\n");
+ ERRMSG("read() will be used instead of mmap() from now.\n");
- if (size > 0)
- goto next_region;
+ /*
+ * Fall back to read().
+ */
+ info->flag_usemmap = MMAP_DISABLE;
+ return NULL;
+ }
- return TRUE;
+ if (offset < info->mmap_start_offset ||
+ offset + info->page_size > info->mmap_end_offset)
+ return NULL;
+
+ return info->mmap_buf + (offset - info->mmap_start_offset);
}
static int
@@ -387,33 +406,16 @@ read_from_vmcore(off_t offset, void *bufptr, unsigned long size)
{
const off_t failed = (off_t)-1;
- if (info->flag_usemmap == MMAP_ENABLE &&
- page_is_fractional(offset) == FALSE) {
- if (!read_with_mmap(offset, bufptr, size)) {
- ERRMSG("Can't read the dump memory(%s) with mmap().\n",
- info->name_memory);
-
- ERRMSG("This kernel might have some problems about mmap().\n");
- ERRMSG("read() will be used instead of mmap() from now.\n");
-
- /*
- * Fall back to read().
- */
- info->flag_usemmap = MMAP_DISABLE;
- read_from_vmcore(offset, bufptr, size);
- }
- } else {
- if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
- ERRMSG("Can't seek the dump memory(%s). (offset: %llx) %s\n",
- info->name_memory, (unsigned long long)offset, strerror(errno));
- return FALSE;
- }
+ if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
+ ERRMSG("Can't seek the dump memory(%s). (offset: %llx) %s\n",
+ info->name_memory, (unsigned long long)offset, strerror(errno));
+ return FALSE;
+ }
- if (read(info->fd_memory, bufptr, size) != size) {
- ERRMSG("Can't read the dump memory(%s). %s\n",
- info->name_memory, strerror(errno));
- return FALSE;
- }
+ if (read(info->fd_memory, bufptr, size) != size) {
+ ERRMSG("Can't read the dump memory(%s). %s\n",
+ info->name_memory, strerror(errno));
+ return FALSE;
}
return TRUE;
@@ -662,7 +664,18 @@ next_page:
if (!readpage_sadump(pgaddr, pgbuf))
goto error_cached;
} else {
- if (!readpage_elf(pgaddr, pgbuf))
+ char *mapbuf = mappage_elf(pgaddr);
+ size_t mapoff;
+
+ if (mapbuf) {
+ pgbuf = mapbuf;
+ mapoff = mapbuf - info->mmap_buf;
+ cached->paddr = pgaddr - mapoff;
+ cached->bufptr = info->mmap_buf;
+ cached->buflen = info->mmap_end_offset -
+ info->mmap_start_offset;
+ cached->discard = unmap_cache;
+ } else if (!readpage_elf(pgaddr, pgbuf))
goto error_cached;
}
cache_add(cached);
--
1.8.4.5
More information about the kexec
mailing list