the exiting makedumpfile is almost there... :)
Ken'ichi Ohmichi
oomichi at mxs.nes.nec.co.jp
Thu Sep 25 07:31:30 EDT 2008
Hi Jay,
The last patch contained a bug yet, so please use this patch.
Sorry for my mistake.
Ken'ichi Ohmichi wrote:
> Yes, your test data helps me.
> Your test informed me that there was the buffer-handling problem
> referring different page yet, so the last patch is not enough.
> I created the attached patch for fixing it. Could you test the
> attached patch again ? Sorry for many tests.
>
> This patch is for makedumpfile-1.2.9.
>
>
> Thanks
> Ken'ichi Ohmichi
>
> diff -puN a/makedumpfile.c b/makedumpfile.c
> --- a/makedumpfile.c 2008-09-25 15:39:00.000000000 +0900
> +++ b/makedumpfile.c 2008-09-25 15:39:17.000000000 +0900
> @@ -4133,6 +4133,7 @@ exclude_unnecessary_pages(void)
> unsigned int mm;
> unsigned long mem_map;
> unsigned long long pfn, paddr, pfn_mm;
> + unsigned long long pfn_read_start, pfn_read_end, index_pg;
> unsigned char *page_cache = NULL, *pcache;
> unsigned int _count;
> unsigned long flags, mapping;
> @@ -4156,6 +4157,12 @@ exclude_unnecessary_pages(void)
> if (mem_map == NOT_MEMMAP_ADDR)
> continue;
>
> + /*
> + * Refresh the buffer of struct page, when changing mem_map.
> + */
> + pfn_read_start = ULONGLONG_MAX;
> + pfn_read_end = 0;
> +
> for (; pfn < mmd->pfn_end;
> pfn++, mem_map += SIZE(page),
> paddr += info->page_size) {
> @@ -4166,16 +4173,23 @@ exclude_unnecessary_pages(void)
> if (!is_in_segs(paddr))
> continue;
>
> - if ((pfn % PGMM_CACHED) == 0) {
> - if (pfn + PGMM_CACHED < mmd->pfn_end)
> - pfn_mm = PGMM_CACHED;
> + index_pg = pfn % PGMM_CACHED;
> + if (pfn < pfn_read_start || pfn_read_end < pfn) {
> + if (roundup(pfn, PGMM_CACHED) < mmd->pfn_end)
~~~~~~~~~~~~~~~~~~~~~~~~~ This is a bug.
The above should be roundup(pfn + 1, PGMM_CACHED).
Thanks
Ken'ichi Ohmichi
---
diff -puN a/makedumpfile.c b/makedumpfile.c
--- a/makedumpfile.c 2008-09-04 16:31:58.000000000 +0900
+++ b/makedumpfile.c 2008-09-25 20:27:48.000000000 +0900
@@ -4133,6 +4133,7 @@ exclude_unnecessary_pages(void)
unsigned int mm;
unsigned long mem_map;
unsigned long long pfn, paddr, pfn_mm;
+ unsigned long long pfn_read_start, pfn_read_end, index_pg;
unsigned char *page_cache = NULL, *pcache;
unsigned int _count;
unsigned long flags, mapping;
@@ -4156,6 +4157,12 @@ exclude_unnecessary_pages(void)
if (mem_map == NOT_MEMMAP_ADDR)
continue;
+ /*
+ * Refresh the buffer of struct page, when changing mem_map.
+ */
+ pfn_read_start = ULONGLONG_MAX;
+ pfn_read_end = 0;
+
for (; pfn < mmd->pfn_end;
pfn++, mem_map += SIZE(page),
paddr += info->page_size) {
@@ -4166,16 +4173,24 @@ exclude_unnecessary_pages(void)
if (!is_in_segs(paddr))
continue;
- if ((pfn % PGMM_CACHED) == 0) {
- if (pfn + PGMM_CACHED < mmd->pfn_end)
- pfn_mm = PGMM_CACHED;
+ index_pg = pfn % PGMM_CACHED;
+ if (pfn < pfn_read_start || pfn_read_end < pfn) {
+ if (roundup(pfn + 1, PGMM_CACHED) < mmd->pfn_end)
+ pfn_mm = PGMM_CACHED - index_pg;
else
pfn_mm = mmd->pfn_end - pfn;
- if (!readmem(VADDR, mem_map, page_cache,
- SIZE(page) * pfn_mm))
+
+ if (!readmem(VADDR, mem_map,
+ page_cache + (index_pg * SIZE(page)),
+ SIZE(page) * pfn_mm)) {
+ ERRMSG("Can't read the buffer of struct page.\n");
goto out;
+ }
+ pfn_read_start = pfn;
+ pfn_read_end = pfn + pfn_mm - 1;
}
- pcache = page_cache + ((pfn%PGMM_CACHED) * SIZE(page));
+ pcache = page_cache + (index_pg * SIZE(page));
+
flags = ULONG(pcache + OFFSET(page.flags));
_count = UINT(pcache + OFFSET(page._count));
mapping = ULONG(pcache + OFFSET(page.mapping));
More information about the kexec
mailing list