[Crash-utility] [PATCH 2/3] Fix a KASLR problem of virsh dump

Takao Indoh indou.takao at jp.fujitsu.com
Tue Oct 10 09:25:38 UTC 2017


This patch fix a problem that crash cannot open a dumpfile which is
captured by "virsh dump --memory-only" in KASLR enabled kernel.

When KASLR feature is enabled, a kernel is placed on the memory randomly
and therefore crash cannot open a dumpfile captured by
"virsh dump --memory-only" because addresses of kernel symbols in
System.map or vmlinux are different from actual addresses.

To solve this problem, we need to calculate kaslr offset(the difference
between original symbol address and actual address) and phys_base, and
adjust symbol table of crash. In the case of dumpfile of kdump, these
information is included in the header, but dumpfile of virsh dump does
not have such a information.

This patch calculate kaslr offset and phys_base to solve this problem.
Please see the comment in the calc_kaslr_offset() for the detail idea.
The basic idea is getting register (IDTR and CR3) from dump header, and
calculate kaslr_offset/phys_base using them. Note that this patch works
only for x86_64 arch and works only for "virsh dump --memory-only".

Signed-off-by: Takao Indoh <indou.takao at jp.fujitsu.com>
---
 defs.h    |   7 +
 netdump.c | 502 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 netdump.h |   1 +
 symbols.c |  36 +++++
 x86_64.c  |   9 ++
 5 files changed, 555 insertions(+)

diff --git a/defs.h b/defs.h
index cddca93..558ab7b 100644
--- a/defs.h
+++ b/defs.h
@@ -2585,6 +2585,9 @@ struct symbol_table_data {
 	ulong first_section_start;
 	ulong last_section_end;
 	ulong _stext_vmlinux;
+	ulong divide_error_vmlinux;
+	ulong idt_table_vmlinux;
+	ulong saved_command_line_vmlinux;
 	struct downsized downsized;
 };
 
@@ -6125,6 +6128,8 @@ int get_netdump_arch(void);
 int exist_regs_in_elf_notes(struct task_context *);
 void *get_regs_from_elf_notes(struct task_context *);
 void map_cpus_to_prstatus(void);
+int kdump_phys_base(ulong *);
+int kdump_set_phys_base(ulong);
 int arm_kdump_phys_base(ulong *);
 int is_proc_kcore(char *, ulong);
 int proc_kcore_init(FILE *);
@@ -6138,6 +6143,8 @@ void display_ELF_note(int, int, void *, FILE *);
 void *netdump_get_prstatus_percpu(int);
 #define PRSTATUS_NOTE (1)
 #define QEMU_NOTE     (2)
+int vmcore_kaslr_check(void);
+int calc_kaslr_offset(ulong *kaslr_offset, ulong *phys_base);
 
 /*
  * ramdump.c
diff --git a/netdump.c b/netdump.c
index 0772e02..b03ba13 100644
--- a/netdump.c
+++ b/netdump.c
@@ -4694,3 +4694,505 @@ error(INFO, "%s: backup region is used: %llx\n", typename, backup_offset + total
 error:
 	error(WARNING, "failed to init kexec backup region\n");
 }
+
+int
+vmcore_kaslr_check(void)
+{
+	if (!VMCORE_VALID() || !(pc->flags2 & QEMU_MEM_DUMP_ELF))
+		return FALSE;
+
+	/* If vmcore has QEMU note, need to calculate kaslr offset */
+	if (nd->num_qemu_notes)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+QEMUCPUState *
+get_qemucpustate(int cpu)
+{
+	if (cpu >= nd->num_qemu_notes) {
+		if (CRASHDEBUG(1))
+			error(INFO,
+			    "Invalid index for QEMU Note: %d (>= %d)\n",
+			    cpu, nd->num_qemu_notes);
+		return NULL;
+	}
+
+	if (!nd->elf64 || (nd->elf64->e_machine != EM_X86_64)) {
+		if (CRASHDEBUG(1))
+			error(INFO, "Only x86_64 64bit is supported.\n");
+		return NULL;
+	}
+
+	return (QEMUCPUState *)nd->nt_qemu_percpu[cpu];
+}
+
+int kdump_phys_base(ulong *phys_base)
+{
+	if (!vmcore_kaslr_check())
+		return FALSE;
+
+	if (nd->phys_base) {
+		*phys_base = nd->phys_base;
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+int kdump_set_phys_base(ulong phys_base)
+{
+	if (!vmcore_kaslr_check())
+		return FALSE;
+
+	nd->phys_base = phys_base;
+
+	return TRUE;
+}
+
+/*
+ * Get address of vector0 interrupt handler (Devide Error) form Interrupt
+ * Descriptor Table.
+ */
+static ulong
+get_vec0_addr(ulong idtr)
+{
+	ulong buf[16];
+	ulong *ip, i1, i2, addr;
+
+	readmem(idtr, PHYSADDR, buf, sizeof(buf), "idt_table", FAULT_ON_ERROR);
+	ip = (ulong *)buf;
+
+	i1 = *ip;
+	i2 = *(ip+1);
+
+	i2 <<= 32;
+	addr = i2 & 0xffffffff00000000;
+	addr |= (i1 & 0xffff);
+	i1 >>= 32;
+	addr |= (i1 & 0xffff0000);
+
+	return addr;
+}
+
+ulong
+static qemu_get_idtr(void)
+{
+	QEMUCPUState *cpustat;
+
+	cpustat = get_qemucpustate(0);
+	return cpustat->idt.base;
+}
+
+ulong
+static qemu_get_cr3(void)
+{
+	QEMUCPUState *cpustat;
+
+	cpustat = get_qemucpustate(0);
+	return cpustat->cr[3];
+}
+
+/*
+ * Parse a string of [size[KMG]@]offset[KMG]
+ * Import from Linux kernel(lib/cmdline.c)
+ */
+static ulong memparse(char *ptr, char **retptr)
+{
+	char *endptr;
+
+	unsigned long long ret = strtoull(ptr, &endptr, 0);
+
+	switch (*endptr) {
+	case 'E':
+	case 'e':
+		ret <<= 10;
+	case 'P':
+	case 'p':
+		ret <<= 10;
+	case 'T':
+	case 't':
+		ret <<= 10;
+	case 'G':
+	case 'g':
+		ret <<= 10;
+	case 'M':
+	case 'm':
+		ret <<= 10;
+	case 'K':
+	case 'k':
+		ret <<= 10;
+		endptr++;
+	default:
+		break;
+	}
+
+	if (retptr)
+		*retptr = endptr;
+
+	return ret;
+}
+
+/*
+ * Find "elfcorehdr=" in the boot parameter of kernel and return the address
+ * of elfcorehdr.
+ */
+static ulong
+get_elfcorehdr(ulong cr3, ulong kaslr_offset)
+{
+	char cmdline[BUFSIZE], *ptr;
+	ulong cmdline_vaddr;
+	ulong cmdline_paddr;
+	ulong buf_vaddr, buf_paddr;
+	int i;
+	char elfcorehdr_val[16], *end;
+	ulong elfcorehdr_addr = 0, elfcorehdr_size = 0;
+	int verbose = CRASHDEBUG(1)? 1: 0;
+
+	cmdline_vaddr = st->saved_command_line_vmlinux + kaslr_offset;
+	if (!x86_64_kvtop_pagetable(cmdline_vaddr, &cmdline_paddr, verbose))
+		return 0;
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "cmdline vaddr=%lx\n", cmdline_vaddr);
+		fprintf(fp, "cmdline paddr=%lx\n", cmdline_paddr);
+	}
+
+	if (!readmem(cmdline_paddr, PHYSADDR, &buf_vaddr, sizeof(ulong),
+		     "saved_command_line", FAULT_ON_ERROR))
+		return 0;
+
+	if (!x86_64_kvtop_pagetable(buf_vaddr, &buf_paddr, verbose))
+		return 0;
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "cmdline buffer vaddr=%lx\n", buf_vaddr);
+		fprintf(fp, "cmdline buffer paddr=%lx\n", buf_paddr);
+	}
+
+	memset(cmdline, 0, BUFSIZE);
+	if (!readmem(buf_paddr, PHYSADDR, cmdline, BUFSIZE,
+		     "saved_command_line", FAULT_ON_ERROR))
+		return 0;
+
+	ptr = strstr(cmdline, "elfcorehdr=");
+	if (!ptr)
+		return 0;
+
+	ptr += strlen("elfcorehdr=");
+	memset(elfcorehdr_val, 0, sizeof(elfcorehdr_val));
+	sscanf(ptr, "%s", elfcorehdr_val);
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "2nd kernel detected\n", buf_vaddr);
+		fprintf(fp, "elfcorehdr=%s\n", elfcorehdr_val);
+	}
+
+	/*  elfcorehdr=[size[KMG]@]offset[KMG] */
+	elfcorehdr_addr = memparse(elfcorehdr_val, &end);
+	if (*end == '@') {
+		elfcorehdr_size = elfcorehdr_addr;
+		elfcorehdr_addr = memparse(end + 1, &end);
+	}
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "elfcorehdr_addr=%lx\n", elfcorehdr_addr);
+		fprintf(fp, "elfcorehdr_size=%lx\n", elfcorehdr_size);
+	}
+
+	return elfcorehdr_addr;
+}
+
+ /*
+  * Get vmcoreinfo from elfcorehdr.
+  * Some codes are imported from Linux kernel(fs/proc/vmcore.c)
+  */
+static int
+get_vmcoreinfo(ulong elfcorehdr, ulong *addr, int *len)
+{
+	unsigned char e_ident[EI_NIDENT];
+	Elf64_Ehdr ehdr;
+	Elf64_Phdr phdr;
+	Elf64_Nhdr nhdr;
+	ulong ptr;
+	ulong nhdr_offset = 0;
+	int i;
+
+	if (!readmem(elfcorehdr, PHYSADDR, e_ident, EI_NIDENT,
+		     "EI_NIDENT", FAULT_ON_ERROR))
+		return FALSE;
+
+	if (e_ident[EI_CLASS] != ELFCLASS64) {
+		error(INFO, "Only ELFCLASS64 is supportd\n");
+		return FALSE;
+	}
+
+	if (!readmem(elfcorehdr, PHYSADDR, &ehdr, sizeof(ehdr),
+			"Elf64_Ehdr", FAULT_ON_ERROR))
+		return FALSE;
+
+	/* Sanity Check */
+	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
+		(ehdr.e_type != ET_CORE) ||
+		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
+		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
+		ehdr.e_version != EV_CURRENT ||
+		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
+		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
+		ehdr.e_phnum == 0) {
+		error(INFO, "Invalid elf header\n");
+		return FALSE;
+	}
+
+	ptr = elfcorehdr + sizeof(ehdr);
+	for (i = 0; i < ehdr.e_phnum; i++) {
+		ulong offset;
+		char name[16];
+
+		if (!readmem(ptr, PHYSADDR, &phdr, sizeof(phdr),
+				"Elf64_Phdr", FAULT_ON_ERROR))
+			return FALSE;
+
+		ptr += sizeof(phdr);
+		if (phdr.p_type != PT_NOTE)
+			continue;
+
+		offset = phdr.p_offset;
+		if (!readmem(offset, PHYSADDR, &nhdr, sizeof(nhdr),
+				"Elf64_Nhdr", FAULT_ON_ERROR))
+			return FALSE;
+
+		offset += DIV_ROUND_UP(sizeof(Elf64_Nhdr), sizeof(Elf64_Word))*
+			  sizeof(Elf64_Word);
+		memset(name, 0, sizeof(name));
+		if (!readmem(offset, PHYSADDR, name, sizeof(name),
+				"Elf64_Nhdr name", FAULT_ON_ERROR))
+			return FALSE;
+
+		if(!strcmp(name, "VMCOREINFO")) {
+			nhdr_offset = offset;
+			break;
+		}
+	}
+
+	if (!nhdr_offset)
+		return FALSE;
+
+	*addr = nhdr_offset +
+		DIV_ROUND_UP(nhdr.n_namesz, sizeof(Elf64_Word))*
+		sizeof(Elf64_Word);
+	*len = nhdr.n_descsz;
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "vmcoreinfo addr=%lx\n", *addr);
+		fprintf(fp, "vmcoreinfo len=%d\n", *len);
+	}
+
+	return TRUE;
+}
+
+/*
+ * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd kernel.
+ * If we are in 2nd kernel, get kaslr_offset/phys_base from vmcoreinfo.
+ *
+ * 1. Get command line and try to retrieve "elfcorehdr=" boot parameter
+ * 2. If "elfcorehdr=" is not found in command line, we are in 1st kernel.
+ *    There is nothing to do.
+ * 3. If "elfcorehdr=" is found, we are in 2nd kernel. Find vmcoreinfo
+ *    using "elfcorehdr=" and retrieve kaslr_offset/phys_base from vmcoreinfo.
+ */
+int
+get_kaslr_offset_from_vmcoreinfo(ulong cr3, ulong orig_kaslr_offset,
+		                 ulong *kaslr_offset, ulong *phys_base)
+{
+	ulong elfcorehdr_addr = 0;
+	ulong vmcoreinfo_addr;
+	int vmcoreinfo_len;
+	char *buf, *pos, *endptr;
+
+	/* Find "elfcorehdr=" in the kernel boot parameter */
+	elfcorehdr_addr = get_elfcorehdr(cr3, orig_kaslr_offset);
+	if (!elfcorehdr_addr)
+		return FALSE;
+
+	/* Get vmcoreinfo from the address of "elfcorehdr=" */
+	if (!get_vmcoreinfo(elfcorehdr_addr, &vmcoreinfo_addr, &vmcoreinfo_len))
+		return FALSE;
+
+	if (CRASHDEBUG(1))
+		fprintf(fp, "Find vmcoreinfo in kdump memory\n");
+
+	buf = GETBUF(vmcoreinfo_len);
+	if (!readmem(vmcoreinfo_addr, PHYSADDR, buf, vmcoreinfo_len,
+			"vmcoreinfo", FAULT_ON_ERROR))
+		goto free_and_return;
+
+	/* Get phys_base form vmcoreinfo */
+	pos = strstr(buf, "NUMBER(phys_base)=");
+	if (!pos)
+		goto free_and_return;
+	*phys_base  = strtoull(pos + strlen("NUMBER(phys_base)="), &endptr, 0);
+
+	/* Get kaslr_offset form vmcoreinfo */
+	pos = strstr(buf, "KERNELOFFSET=");
+	if (!pos)
+		goto free_and_return;
+	*kaslr_offset = strtoull(pos + strlen("KERNELOFFSET="), &endptr, 16);
+
+	FREEBUF(buf);
+	return TRUE;
+
+free_and_return:
+	FREEBUF(buf);
+	return FALSE;
+}
+
+/*
+ * Calculate kaslr_offset and phys_base
+ *
+ * kaslr_offset:
+ *   The difference between original address in System.map or vmlinux and
+ *   actual address placed randomly by kaslr feature. To be more accurate,
+ *   kaslr_offset = actual address  - original address
+ *
+ * phys_base:
+ *   Physical address where the kerenel is placed. In other words, it's a
+ *   physical address of __START_KERNEL_map. This is also decided randomly by
+ *   kaslr.
+ *
+ * kaslr offset and phys_base are calculated as follows:
+ *
+ * kaslr_offset:
+ * 1) Get IDTR and CR3 value from the dump header.
+ * 2) Get a virtual address of IDT from IDTR value
+ *    --- (A)
+ * 3) Translate (A) to physical address using CR3, which points a top of
+ *    page table.
+ *    --- (B)
+ * 4) Get an address of vector0 (Devide Error) interrupt handler from
+ *    IDT, which are pointed by (B).
+ *    --- (C)
+ * 5) Get an address of symbol "divide_error" form vmlinux
+ *    --- (D)
+ *
+ * Now we have two addresses:
+ * (C)-> Actual address of "divide_error"
+ * (D)-> Original address of "divide_error" in the vmlinux
+ *
+ * kaslr_offset can be calculated by the difference between these two
+ * value.
+ *
+ * phys_base;
+ * 1) Get IDT virtual address from vmlinux
+ *    --- (E)
+ *
+ * So phys_base can be calculated using relationship of directly mapped
+ * address.
+ *
+ * phys_base =
+ *   Physical address(B) -
+ *   (Virtual address(E) + kaslr_offset - __START_KERNEL_map)
+ *
+ * Note that the address (A) cannot be used instead of (E) because (A) is
+ * not direct map address, it's a fixed map address.
+ *
+ * This solution works in most every case, but does not work in the
+ * following case.
+ *
+ * 1) If the dump is captured on early stage of kernel boot, IDTR points
+ *    early IDT table(early_idts) instead of normal IDT(idt_table).
+ * 2) If the dump is captured whle kdump is working, IDTR points
+ *    IDT table of 2nd kernel, not 1st kernel.
+ *
+ * Current implementation does not support the case 1), need
+ * enhancement in the future. For the case 2), get kaslr_offset and
+ * phys_base as follows.
+ *
+ * 1) Get kaslr_offset and phys_base using the above solution.
+ * 2) Get kernel boot parameter from "saved_command_line"
+ * 3) If "elfcorehdr=" is not included in boot parameter, we are in the
+ *    first kernel, nothing to do any more.
+ * 4) If "elfcorehdr=" is included in boot parameter, we are in the 2nd
+ *    kernel. Retrieve vmcoreinfo from address of "elfcorehdr=" and
+ *    get kaslr_offset and phys_base from vmcoreinfo.
+ */
+int
+calc_kaslr_offset(ulong *kaslr_offset, ulong *phys_base)
+{
+	uint64_t idtr = 0, cr3 = 0, idtr_paddr;
+	ulong divide_error_vmcore;
+	ulong kaslr_offset_kdump, phys_base_kdump;
+	int ret = FALSE;
+	int verbose = CRASHDEBUG(1)? 1: 0;
+
+	if (!machine_type("X86_64"))
+		return FALSE;
+
+	if (vmcore_kaslr_check()) {
+		idtr = qemu_get_idtr();
+		cr3 = qemu_get_cr3();
+	} else
+		return FALSE;
+
+	/*
+	 * Set up for kvtop.
+	 *
+	 * calc_kaslr_offset() is called before machdep_init(PRE_GDB), so some
+	 * variables are not initialized yet. Set up them here to call
+	 * x86_64_kvtop_pagetable().
+	 *
+	 * TODO: XEN and 5-level is not supported
+	 */
+	vt->kernel_pgd[0] = cr3;
+	machdep->machspec->last_pml4_read = vt->kernel_pgd[0];
+	machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6;
+	machdep->machspec->pgdir_shift = PGDIR_SHIFT;
+	if (!readmem(cr3, PHYSADDR, machdep->machspec->pml4, PAGESIZE(),
+			"cr3", FAULT_ON_ERROR))
+		goto quit;
+
+	/* Convert virtual address of IDT table to physical address */
+	if (!x86_64_kvtop_pagetable(idtr, &idtr_paddr, verbose))
+		goto quit;
+
+	/* Now we can calculate kaslr_offset and phys_base */
+	divide_error_vmcore = get_vec0_addr(idtr_paddr);
+	*kaslr_offset = divide_error_vmcore - st->divide_error_vmlinux;
+	*phys_base = idtr_paddr -
+		(st->idt_table_vmlinux + *kaslr_offset - __START_KERNEL_map);
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "calc_kaslr_offset: idtr=%lx\n", idtr);
+		fprintf(fp, "calc_kaslr_offset: cr3=%lx\n", cr3);
+		fprintf(fp, "calc_kaslr_offset: idtr(phys)=%lx\n", idtr_paddr);
+		fprintf(fp, "calc_kaslr_offset: divide_error(vmlinux): %lx\n",
+			st->divide_error_vmlinux);
+		fprintf(fp, "calc_kaslr_offset: divide_error(vmcore): %lx\n",
+			divide_error_vmcore);
+	}
+
+	/*
+	 * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd
+	 * kernel. If we are in 2nd kernel, get kaslr_offset/phys_base
+	 * from vmcoreinfo
+	 */
+	if (get_kaslr_offset_from_vmcoreinfo(
+		cr3, *kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) {
+		*kaslr_offset =  kaslr_offset_kdump;
+		*phys_base =  phys_base_kdump;
+	}
+
+	if (CRASHDEBUG(1)) {
+		fprintf(fp, "calc_kaslr_offset: kaslr_offset=%lx\n",
+			*kaslr_offset);
+		fprintf(fp, "calc_kaslr_offset: phys_base=%lx\n", *phys_base);
+	}
+
+	ret = TRUE;
+quit:
+	vt->kernel_pgd[0] = 0;
+	machdep->machspec->last_pml4_read = 0;
+	return ret;
+}
+
diff --git a/netdump.h b/netdump.h
index ec6691c..91ae428 100644
--- a/netdump.h
+++ b/netdump.h
@@ -78,6 +78,7 @@ struct vmcore_data {
 	ulong backup_src_size;
 	ulonglong backup_offset;
 	ulong relocate;
+	ulong phys_base;
 };
 
 #define DUMP_ELF_INCOMPLETE  0x1   /* dumpfile is incomplete */
diff --git a/symbols.c b/symbols.c
index c793760..9e0c9df 100644
--- a/symbols.c
+++ b/symbols.c
@@ -624,6 +624,9 @@ kaslr_init(void)
 			st->_stext_vmlinux = UNINITIALIZED;
 		}
 	}
+
+	if (vmcore_kaslr_check())
+		kt->flags2 |= KASLR_CHECK;
 }
 
 /*
@@ -637,6 +640,22 @@ derive_kaslr_offset(bfd *abfd, int dynamic, bfd_byte *start, bfd_byte *end,
 	unsigned long relocate;
 	ulong _stext_relocated;
 
+	if (vmcore_kaslr_check()) {
+		ulong kaslr_offset = 0, phys_base = 0;
+
+		calc_kaslr_offset(&kaslr_offset, &phys_base);
+
+		if (kaslr_offset) {
+			kt->relocate = kaslr_offset * -1;
+			kt->flags |= RELOC_SET;
+		}
+
+		if (phys_base)
+			kdump_set_phys_base(phys_base);
+
+		return;
+	}
+
 	if (ACTIVE()) {
 		_stext_relocated = symbol_value_from_proc_kallsyms("_stext");
 		if (_stext_relocated == BADVAL)
@@ -12236,6 +12255,23 @@ numeric_forward(const void *P_x, const void *P_y)
 		}
 	}
 
+	if (vmcore_kaslr_check()) {
+		if (STREQ(x->name, "divide_error"))
+			st->divide_error_vmlinux = valueof(x);
+		else if (STREQ(y->name, "divide_error"))
+			st->divide_error_vmlinux = valueof(y);
+
+		if (STREQ(x->name, "idt_table"))
+			st->idt_table_vmlinux = valueof(x);
+		else if (STREQ(y->name, "idt_table"))
+			st->idt_table_vmlinux = valueof(y);
+
+		if (STREQ(x->name, "saved_command_line"))
+			st->saved_command_line_vmlinux = valueof(x);
+		else if (STREQ(y->name, "saved_command_line"))
+			st->saved_command_line_vmlinux = valueof(y);
+	}
+
   	xs = bfd_get_section(x);
   	ys = bfd_get_section(y);
 
diff --git a/x86_64.c b/x86_64.c
index fc48425..8fc79b6 100644
--- a/x86_64.c
+++ b/x86_64.c
@@ -6377,6 +6377,15 @@ x86_64_calc_phys_base(void)
 		return;
 	}
 
+	if (KDUMP_DUMPFILE()) {
+		if (kdump_phys_base(&phys_base)) {
+			machdep->machspec->phys_base = phys_base;
+			if (CRASHDEBUG(1))
+				fprintf(fp, "kdump: phys_base: %lx\n",
+					phys_base);
+		}
+	}
+
 	if (SADUMP_DUMPFILE()) {
 		if (sadump_phys_base(&phys_base)) {
 			machdep->machspec->phys_base = phys_base;
-- 
2.9.5





More information about the Crash-utility mailing list