[RFC PATCH v2 7/10] Add function to get number of PT_LOAD for cyclic processing.
Atsushi Kumagai
kumagai-atsushi at mxc.nes.nec.co.jp
Thu Jun 28 22:22:00 EDT 2012
get_loads_dumpfile_cyclic() gets the final number of PT_LOAD with cyclic processing.
This function is necessary to decide the offset of PT_LOAD for writing ELF format.
Signed-off-by: Atsushi Kumagai <kumagai-atsushi at mxc.nes.nec.co.jp>
---
makedumpfile.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
makedumpfile.h | 2 ++
2 files changed, 86 insertions(+)
diff --git a/makedumpfile.c b/makedumpfile.c
index 25d857a..420f103 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -4875,6 +4875,90 @@ read_pfn(unsigned long long pfn, unsigned char *buf)
}
int
+get_loads_dumpfile_cyclic(void)
+{
+ int i, phnum, num_new_load = 0;
+ long page_size = info->page_size;
+ unsigned char buf[info->page_size];
+ unsigned long long pfn, pfn_start, pfn_end, num_excluded;
+ unsigned long frac_head, frac_tail;
+ Elf64_Phdr load;
+
+ /*
+ * Initialize target reggion and bitmap.
+ */
+ info->cyclic_start_pfn = 0;
+ info->cyclic_end_pfn = PFN_CYCLIC;
+ if (!create_1st_bitmap_cyclic())
+ return FALSE;
+ if (!exclude_unnecessary_pages_cyclic())
+ return FALSE;
+
+ if (!(phnum = get_phnum_memory()))
+ return FALSE;
+
+ for (i = 0; i < phnum; i++) {
+ if (!get_phdr_memory(i, &load))
+ return FALSE;
+ if (load.p_type != PT_LOAD)
+ continue;
+
+ pfn_start = paddr_to_pfn(load.p_paddr);
+ pfn_end = paddr_to_pfn(load.p_paddr + load.p_memsz);
+ frac_head = page_size - (load.p_paddr % page_size);
+ frac_tail = (load.p_paddr + load.p_memsz) % page_size;
+
+ num_new_load++;
+ num_excluded = 0;
+
+ if (frac_head && (frac_head != page_size))
+ pfn_start++;
+ if (frac_tail)
+ pfn_end++;
+
+ for (pfn = pfn_start; pfn < pfn_end; pfn++) {
+ /*
+ * Update target region and bitmap
+ */
+ if (!is_cyclic_region(pfn)) {
+ if (!update_cyclic_region(pfn))
+ return FALSE;
+ }
+
+ if (!is_dumpable_cyclic(info->partial_bitmap2, pfn)) {
+ num_excluded++;
+ continue;
+ }
+
+ /*
+ * Exclude zero pages.
+ */
+ if (info->dump_level & DL_EXCLUDE_ZERO) {
+ if (!read_pfn(pfn, buf))
+ return FALSE;
+ if (is_zero_page(buf, page_size)) {
+ num_excluded++;
+ continue;
+ }
+ }
+
+ info->num_dumpable++;
+
+ /*
+ * If the number of the contiguous pages to be excluded
+ * is 256 or more, those pages are excluded really.
+ * And a new PT_LOAD segment is created.
+ */
+ if (num_excluded >= PFN_EXCLUDED) {
+ num_new_load++;
+ }
+ num_excluded = 0;
+ }
+ }
+ return num_new_load;
+}
+
+int
write_kdump_pages(struct cache_data *cd_header, struct cache_data *cd_page)
{
unsigned long long pfn, per, num_dumpable, num_dumped = 0;
diff --git a/makedumpfile.h b/makedumpfile.h
index e336814..77b824e 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -1516,5 +1516,7 @@ struct elf_prstatus {
* Function Prototype.
*/
unsigned long long get_num_dumpable_cyclic(void);
+int get_loads_dumpfile_cyclic(void);
+
#endif /* MAKEDUMPFILE_H */
--
1.7.9.2
More information about the kexec
mailing list