xref: /cloud-hypervisor/vmm/src/coredump.rs (revision ed63b352d1ebf70f36c7d36a0d6b52fc96186581)
1 // Copyright © 2022 ZTE Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5 
6 use std::fs::File;
7 use std::io::Write;
8 
9 #[cfg(target_arch = "x86_64")]
10 use hypervisor::arch::x86::{DescriptorTable, SegmentRegister};
11 use linux_loader::elf;
12 use thiserror::Error;
13 use vm_memory::ByteValued;
14 
15 #[derive(Clone)]
16 pub struct CoredumpMemoryRegion {
17     pub mem_offset_in_elf: u64,
18     pub mem_size: u64,
19 }
20 
21 #[derive(Clone)]
22 pub struct CoredumpMemoryRegions {
23     pub ram_maps: std::collections::BTreeMap<u64, CoredumpMemoryRegion>,
24 }
25 
26 /// Platform information
27 #[derive(Default)]
28 pub struct DumpState {
29     pub elf_note_size: isize,
30     pub elf_phdr_num: u16,
31     pub elf_sh_info: u32,
32     pub mem_offset: u64,
33     pub mem_info: Option<CoredumpMemoryRegions>,
34     pub file: Option<File>,
35 }
36 
37 #[derive(Error, Debug)]
38 pub enum GuestDebuggableError {
39     #[error("coredump")]
40     Coredump(#[source] anyhow::Error),
41     #[error("coredump file")]
42     CoredumpFile(#[source] std::io::Error),
43     #[error("Failed to pause")]
44     Pause(#[source] vm_migration::MigratableError),
45     #[error("Failed to resume")]
46     Resume(#[source] vm_migration::MigratableError),
47 }
48 
49 pub trait GuestDebuggable: vm_migration::Pausable {
50     fn coredump(
51         &mut self,
52         _destination_url: &str,
53     ) -> std::result::Result<(), GuestDebuggableError> {
54         Ok(())
55     }
56 }
57 
58 #[repr(C)]
59 #[derive(Default, Copy, Clone)]
60 pub struct X86_64UserRegs {
61     /// r15, r14, r13, r12, rbp, rbx, r11, r10;
62     pub regs1: [u64; 8],
63     /// r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax;
64     pub regs2: [u64; 8],
65     pub rip: u64,
66     pub cs: u64,
67     pub eflags: u64,
68     pub rsp: u64,
69     pub ss: u64,
70     pub fs_base: u64,
71     pub gs_base: u64,
72     pub ds: u64,
73     pub es: u64,
74     pub fs: u64,
75     pub gs: u64,
76 }
77 
78 // SAFETY: This is just a series of bytes
79 unsafe impl ByteValued for X86_64UserRegs {}
80 
81 #[repr(C)]
82 pub struct X86_64ElfPrStatus {
83     pub pad1: [u8; 32],
84     pub pid: u32,
85     pub pads2: [u8; 76],
86     pub regs: X86_64UserRegs,
87     pub pad3: [u8; 8],
88 }
89 
90 #[repr(C)]
91 #[derive(Default, Copy, Clone)]
92 pub struct CpuSegment {
93     pub selector: u32,
94     pub limit: u32,
95     pub flags: u32,
96     pub pad: u32,
97     pub base: u64,
98 }
99 
100 const DESC_TYPE_SHIFT: u32 = 8;
101 const DESC_S_SHIFT: u32 = 12;
102 const DESC_DPL_SHIFT: u32 = 13;
103 const DESC_P_SHIFT: u32 = 15;
104 const DESC_P_MASK: u32 = 1 << DESC_P_SHIFT;
105 const DESC_AVL_SHIFT: u32 = 20;
106 const DESC_AVL_MASK: u32 = 1 << DESC_AVL_SHIFT;
107 const DESC_L_SHIFT: u32 = 21;
108 const DESC_B_SHIFT: u32 = 22;
109 const DESC_S_MASK: u32 = 1 << DESC_S_SHIFT;
110 const DESC_G_SHIFT: u32 = 23;
111 const DESC_G_MASK: u32 = 1 << DESC_G_SHIFT;
112 
113 impl CpuSegment {
114     pub fn new(reg: SegmentRegister) -> Self {
115         let p_mask = if (reg.present > 0) && (reg.unusable == 0) {
116             DESC_P_MASK
117         } else {
118             0
119         };
120         let flags = ((reg.type_ as u32) << DESC_TYPE_SHIFT)
121             | p_mask
122             | ((reg.dpl as u32) << DESC_DPL_SHIFT)
123             | ((reg.db as u32) << DESC_B_SHIFT)
124             | ((reg.s as u32) * DESC_S_MASK)
125             | ((reg.l as u32) << DESC_L_SHIFT)
126             | ((reg.g as u32) * DESC_G_MASK)
127             | ((reg.avl as u32) * DESC_AVL_MASK);
128 
129         CpuSegment {
130             selector: reg.selector as u32,
131             limit: reg.limit,
132             flags,
133             pad: 0,
134             base: reg.base,
135         }
136     }
137 
138     pub fn new_from_table(reg: DescriptorTable) -> Self {
139         CpuSegment {
140             selector: 0,
141             limit: reg.limit as u32,
142             flags: 0,
143             pad: 0,
144             base: reg.base,
145         }
146     }
147 }
148 
149 #[repr(C)]
150 #[derive(Default, Copy, Clone)]
151 pub struct CpuState {
152     pub version: u32,
153     pub size: u32,
154     /// rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp
155     pub regs1: [u64; 8],
156     /// r8, r9, r10, r11, r12, r13, r14, r15
157     pub regs2: [u64; 8],
158     pub rip: u64,
159     pub rflags: u64,
160     pub cs: CpuSegment,
161     pub ds: CpuSegment,
162     pub es: CpuSegment,
163     pub fs: CpuSegment,
164     pub gs: CpuSegment,
165     pub ss: CpuSegment,
166     pub ldt: CpuSegment,
167     pub tr: CpuSegment,
168     pub gdt: CpuSegment,
169     pub idt: CpuSegment,
170     pub cr: [u64; 5],
171     pub kernel_gs_base: u64,
172 }
173 
174 // SAFETY: This is just a series of bytes
175 unsafe impl ByteValued for CpuState {}
176 
177 pub enum NoteDescType {
178     Elf = 0,
179     Vmm = 1,
180     ElfAndVmm = 2,
181 }
182 
183 // "CORE" or "QEMU"
184 pub const COREDUMP_NAME_SIZE: u32 = 5;
185 pub const NT_PRSTATUS: u32 = 1;
186 
187 /// Core file.
188 const ET_CORE: u16 = 4;
189 /// 64-bit object.
190 const ELFCLASS64: u8 = 2;
191 /// Current ELF version.
192 const EV_CURRENT: u8 = 1;
193 /// AMD x86-64 architecture
194 const EM_X86_64: u16 = 62;
195 
196 pub trait Elf64Writable {
197     fn write_header(
198         &mut self,
199         dump_state: &DumpState,
200     ) -> std::result::Result<(), GuestDebuggableError> {
201         let e_ident = [
202             elf::ELFMAG0 as u8, // magic
203             elf::ELFMAG1,
204             elf::ELFMAG2,
205             elf::ELFMAG3,
206             ELFCLASS64,             // class
207             elf::ELFDATA2LSB as u8, //data
208             EV_CURRENT,             // version
209             0,                      // os_abi
210             0,                      // abi_version
211             0,                      // padding
212             0,
213             0,
214             0,
215             0,
216             0,
217             0,
218         ];
219         let elf64_ehdr_size = std::mem::size_of::<elf::Elf64_Ehdr>();
220         let elf64_phdr_size = std::mem::size_of::<elf::Elf64_Phdr>();
221         let mut elf64_ehdr = elf::Elf64_Ehdr {
222             e_ident,
223             e_type: ET_CORE,
224             e_machine: EM_X86_64,
225             e_version: EV_CURRENT as u32,
226             e_entry: 0,
227             e_phoff: elf64_ehdr_size as u64,
228             e_shoff: 0,
229             e_flags: 0,
230             e_ehsize: 0,
231             e_phentsize: elf64_phdr_size as u16,
232             e_phnum: dump_state.elf_phdr_num,
233             e_shentsize: 0,
234             e_shnum: 0,
235             e_shstrndx: 0,
236         };
237         elf64_ehdr.e_ehsize = std::mem::size_of_val(&elf64_ehdr) as u16;
238 
239         let mut coredump_file = dump_state.file.as_ref().unwrap();
240         let bytes: &[u8] = elf64_ehdr.as_slice();
241         coredump_file
242             .write(bytes)
243             .map_err(GuestDebuggableError::CoredumpFile)?;
244 
245         Ok(())
246     }
247 
248     fn write_note(
249         &mut self,
250         dump_state: &DumpState,
251     ) -> std::result::Result<(), GuestDebuggableError> {
252         let begin = dump_state.mem_offset - dump_state.elf_note_size as u64;
253         let elf64_phdr = elf::Elf64_Phdr {
254             p_type: elf::PT_NOTE,
255             p_flags: 0,
256             p_offset: begin,
257             p_vaddr: 0,
258             p_paddr: 0,
259             p_filesz: dump_state.elf_note_size as u64,
260             p_memsz: dump_state.elf_note_size as u64,
261             p_align: 0,
262         };
263 
264         let mut coredump_file = dump_state.file.as_ref().unwrap();
265         let bytes: &[u8] = elf64_phdr.as_slice();
266         coredump_file
267             .write(bytes)
268             .map_err(GuestDebuggableError::CoredumpFile)?;
269 
270         Ok(())
271     }
272 
273     fn write_load(
274         &mut self,
275         offset: u64,
276         phys_addr: u64,
277         length: u64,
278         virt_addr: u64,
279         dump_state: &DumpState,
280     ) -> std::result::Result<(), GuestDebuggableError> {
281         let elf64_load = elf::Elf64_Phdr {
282             p_type: elf::PT_LOAD,
283             p_flags: 0,
284             p_offset: offset,
285             p_vaddr: virt_addr,
286             p_paddr: phys_addr,
287             p_filesz: length,
288             p_memsz: length,
289             p_align: 0,
290         };
291 
292         let mut coredump_file = dump_state.file.as_ref().unwrap();
293         let bytes: &[u8] = elf64_load.as_slice();
294         coredump_file
295             .write(bytes)
296             .map_err(GuestDebuggableError::CoredumpFile)?;
297 
298         Ok(())
299     }
300 
301     fn write_loads(
302         &mut self,
303         dump_state: &DumpState,
304     ) -> std::result::Result<(), GuestDebuggableError> {
305         let mem_info = dump_state.mem_info.as_ref().unwrap();
306 
307         for (gpa, load) in &mem_info.ram_maps {
308             self.write_load(load.mem_offset_in_elf, *gpa, load.mem_size, 0, dump_state)?;
309         }
310 
311         Ok(())
312     }
313 
314     fn elf_note_size(&self, hdr_size: u32, name_size: u32, desc_size: u32) -> u32 {
315         (hdr_size.div_ceil(4) + name_size.div_ceil(4) + desc_size.div_ceil(4)) * 4
316     }
317 
318     fn get_note_size(&self, desc_type: NoteDescType, nr_cpus: u32) -> u32 {
319         let note_head_size = std::mem::size_of::<elf::Elf64_Nhdr>() as u32;
320         let elf_desc_size = std::mem::size_of::<X86_64ElfPrStatus>() as u32;
321         let cpu_state_desc_size = std::mem::size_of::<CpuState>() as u32;
322 
323         let elf_note_size = self.elf_note_size(note_head_size, COREDUMP_NAME_SIZE, elf_desc_size);
324         let vmm_note_size =
325             self.elf_note_size(note_head_size, COREDUMP_NAME_SIZE, cpu_state_desc_size);
326 
327         match desc_type {
328             NoteDescType::Elf => elf_note_size * nr_cpus,
329             NoteDescType::Vmm => vmm_note_size * nr_cpus,
330             NoteDescType::ElfAndVmm => (elf_note_size + vmm_note_size) * nr_cpus,
331         }
332     }
333 }
334 
335 pub trait CpuElf64Writable {
336     fn cpu_write_elf64_note(
337         &mut self,
338         _dump_state: &DumpState,
339     ) -> std::result::Result<(), GuestDebuggableError> {
340         Ok(())
341     }
342 
343     fn cpu_write_vmm_note(
344         &mut self,
345         _dump_state: &DumpState,
346     ) -> std::result::Result<(), GuestDebuggableError> {
347         Ok(())
348     }
349 }
350