1 // Copyright © 2022 ZTE Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 use std::fs::File; 7 use std::io::Write; 8 9 #[cfg(target_arch = "x86_64")] 10 use hypervisor::arch::x86::{DescriptorTable, SegmentRegister}; 11 use linux_loader::elf; 12 use vm_memory::ByteValued; 13 14 #[derive(Clone)] 15 pub struct CoredumpMemoryRegion { 16 pub mem_offset_in_elf: u64, 17 pub mem_size: u64, 18 } 19 20 #[derive(Clone)] 21 pub struct CoredumpMemoryRegions { 22 pub ram_maps: std::collections::BTreeMap<u64, CoredumpMemoryRegion>, 23 } 24 25 /// Platform information 26 #[derive(Default)] 27 pub struct DumpState { 28 pub elf_note_size: isize, 29 pub elf_phdr_num: u16, 30 pub elf_sh_info: u32, 31 pub mem_offset: u64, 32 pub mem_info: Option<CoredumpMemoryRegions>, 33 pub file: Option<File>, 34 } 35 36 #[derive(Debug)] 37 pub enum GuestDebuggableError { 38 Coredump(anyhow::Error), 39 CoredumpFile(std::io::Error), 40 Pause(vm_migration::MigratableError), 41 Resume(vm_migration::MigratableError), 42 } 43 44 pub trait GuestDebuggable: vm_migration::Pausable { 45 fn coredump( 46 &mut self, 47 _destination_url: &str, 48 ) -> std::result::Result<(), GuestDebuggableError> { 49 Ok(()) 50 } 51 } 52 53 #[macro_export] 54 macro_rules! div_round_up { 55 ($n:expr,$d:expr) => { 56 ($n + $d - 1) / $d 57 }; 58 } 59 60 #[repr(C)] 61 #[derive(Default, Copy, Clone)] 62 pub struct X86_64UserRegs { 63 /// r15, r14, r13, r12, rbp, rbx, r11, r10; 64 pub regs1: [u64; 8], 65 /// r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax; 66 pub regs2: [u64; 8], 67 pub rip: u64, 68 pub cs: u64, 69 pub eflags: u64, 70 pub rsp: u64, 71 pub ss: u64, 72 pub fs_base: u64, 73 pub gs_base: u64, 74 pub ds: u64, 75 pub es: u64, 76 pub fs: u64, 77 pub gs: u64, 78 } 79 80 // SAFETY: This is just a series of bytes 81 unsafe impl ByteValued for X86_64UserRegs {} 82 83 #[repr(C)] 84 pub struct X86_64ElfPrStatus { 85 pub pad1: [u8; 32], 86 pub pid: u32, 87 pub pads2: [u8; 76], 88 pub regs: X86_64UserRegs, 89 pub pad3: [u8; 8], 90 } 91 92 #[repr(C)] 93 #[derive(Default, Copy, Clone)] 94 pub struct CpuSegment { 95 pub selector: u32, 96 pub limit: u32, 97 pub flags: u32, 98 pub pad: u32, 99 pub base: u64, 100 } 101 102 const DESC_TYPE_SHIFT: u32 = 8; 103 const DESC_S_SHIFT: u32 = 12; 104 const DESC_DPL_SHIFT: u32 = 13; 105 const DESC_P_SHIFT: u32 = 15; 106 const DESC_P_MASK: u32 = 1 << DESC_P_SHIFT; 107 const DESC_AVL_SHIFT: u32 = 20; 108 const DESC_AVL_MASK: u32 = 1 << DESC_AVL_SHIFT; 109 const DESC_L_SHIFT: u32 = 21; 110 const DESC_B_SHIFT: u32 = 22; 111 const DESC_S_MASK: u32 = 1 << DESC_S_SHIFT; 112 const DESC_G_SHIFT: u32 = 23; 113 const DESC_G_MASK: u32 = 1 << DESC_G_SHIFT; 114 115 impl CpuSegment { 116 pub fn new(reg: SegmentRegister) -> Self { 117 let p_mask = if (reg.present > 0) && (reg.unusable == 0) { 118 DESC_P_MASK 119 } else { 120 0 121 }; 122 let flags = ((reg.type_ as u32) << DESC_TYPE_SHIFT) 123 | p_mask 124 | ((reg.dpl as u32) << DESC_DPL_SHIFT) 125 | ((reg.db as u32) << DESC_B_SHIFT) 126 | ((reg.s as u32) * DESC_S_MASK) 127 | ((reg.l as u32) << DESC_L_SHIFT) 128 | ((reg.g as u32) * DESC_G_MASK) 129 | ((reg.avl as u32) * DESC_AVL_MASK); 130 131 CpuSegment { 132 selector: reg.selector as u32, 133 limit: reg.limit, 134 flags, 135 pad: 0, 136 base: reg.base, 137 } 138 } 139 140 pub fn new_from_table(reg: DescriptorTable) -> Self { 141 CpuSegment { 142 selector: 0, 143 limit: reg.limit as u32, 144 flags: 0, 145 pad: 0, 146 base: reg.base, 147 } 148 } 149 } 150 151 #[repr(C)] 152 #[derive(Default, Copy, Clone)] 153 pub struct CpuState { 154 pub version: u32, 155 pub size: u32, 156 /// rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp 157 pub regs1: [u64; 8], 158 /// r8, r9, r10, r11, r12, r13, r14, r15 159 pub regs2: [u64; 8], 160 pub rip: u64, 161 pub rflags: u64, 162 pub cs: CpuSegment, 163 pub ds: CpuSegment, 164 pub es: CpuSegment, 165 pub fs: CpuSegment, 166 pub gs: CpuSegment, 167 pub ss: CpuSegment, 168 pub ldt: CpuSegment, 169 pub tr: CpuSegment, 170 pub gdt: CpuSegment, 171 pub idt: CpuSegment, 172 pub cr: [u64; 5], 173 pub kernel_gs_base: u64, 174 } 175 176 // SAFETY: This is just a series of bytes 177 unsafe impl ByteValued for CpuState {} 178 179 pub enum NoteDescType { 180 Elf = 0, 181 Vmm = 1, 182 ElfAndVmm = 2, 183 } 184 185 // "CORE" or "QEMU" 186 pub const COREDUMP_NAME_SIZE: u32 = 5; 187 pub const NT_PRSTATUS: u32 = 1; 188 189 /// Core file. 190 const ET_CORE: u16 = 4; 191 /// 64-bit object. 192 const ELFCLASS64: u8 = 2; 193 /// Current ELF version. 194 const EV_CURRENT: u8 = 1; 195 /// AMD x86-64 architecture 196 const EM_X86_64: u16 = 62; 197 198 pub trait Elf64Writable { 199 fn write_header( 200 &mut self, 201 dump_state: &DumpState, 202 ) -> std::result::Result<(), GuestDebuggableError> { 203 let e_ident = [ 204 elf::ELFMAG0 as u8, // magic 205 elf::ELFMAG1, 206 elf::ELFMAG2, 207 elf::ELFMAG3, 208 ELFCLASS64, // class 209 elf::ELFDATA2LSB as u8, //data 210 EV_CURRENT, // version 211 0, // os_abi 212 0, // abi_version 213 0, // padding 214 0, 215 0, 216 0, 217 0, 218 0, 219 0, 220 ]; 221 let elf64_ehdr_size = std::mem::size_of::<elf::Elf64_Ehdr>(); 222 let elf64_phdr_size = std::mem::size_of::<elf::Elf64_Phdr>(); 223 let mut elf64_ehdr = elf::Elf64_Ehdr { 224 e_ident, 225 e_type: ET_CORE, 226 e_machine: EM_X86_64, 227 e_version: EV_CURRENT as u32, 228 e_entry: 0, 229 e_phoff: elf64_ehdr_size as u64, 230 e_shoff: 0, 231 e_flags: 0, 232 e_ehsize: 0, 233 e_phentsize: elf64_phdr_size as u16, 234 e_phnum: dump_state.elf_phdr_num, 235 e_shentsize: 0, 236 e_shnum: 0, 237 e_shstrndx: 0, 238 }; 239 elf64_ehdr.e_ehsize = std::mem::size_of_val(&elf64_ehdr) as u16; 240 241 let mut coredump_file = dump_state.file.as_ref().unwrap(); 242 let bytes: &[u8] = elf64_ehdr.as_slice(); 243 coredump_file 244 .write(bytes) 245 .map_err(GuestDebuggableError::CoredumpFile)?; 246 247 Ok(()) 248 } 249 250 fn write_note( 251 &mut self, 252 dump_state: &DumpState, 253 ) -> std::result::Result<(), GuestDebuggableError> { 254 let begin = dump_state.mem_offset - dump_state.elf_note_size as u64; 255 let elf64_phdr = elf::Elf64_Phdr { 256 p_type: elf::PT_NOTE, 257 p_flags: 0, 258 p_offset: begin, 259 p_vaddr: 0, 260 p_paddr: 0, 261 p_filesz: dump_state.elf_note_size as u64, 262 p_memsz: dump_state.elf_note_size as u64, 263 p_align: 0, 264 }; 265 266 let mut coredump_file = dump_state.file.as_ref().unwrap(); 267 let bytes: &[u8] = elf64_phdr.as_slice(); 268 coredump_file 269 .write(bytes) 270 .map_err(GuestDebuggableError::CoredumpFile)?; 271 272 Ok(()) 273 } 274 275 fn write_load( 276 &mut self, 277 offset: u64, 278 phys_addr: u64, 279 length: u64, 280 virt_addr: u64, 281 dump_state: &DumpState, 282 ) -> std::result::Result<(), GuestDebuggableError> { 283 let elf64_load = elf::Elf64_Phdr { 284 p_type: elf::PT_LOAD, 285 p_flags: 0, 286 p_offset: offset, 287 p_vaddr: virt_addr, 288 p_paddr: phys_addr, 289 p_filesz: length, 290 p_memsz: length, 291 p_align: 0, 292 }; 293 294 let mut coredump_file = dump_state.file.as_ref().unwrap(); 295 let bytes: &[u8] = elf64_load.as_slice(); 296 coredump_file 297 .write(bytes) 298 .map_err(GuestDebuggableError::CoredumpFile)?; 299 300 Ok(()) 301 } 302 303 fn write_loads( 304 &mut self, 305 dump_state: &DumpState, 306 ) -> std::result::Result<(), GuestDebuggableError> { 307 let mem_info = dump_state.mem_info.as_ref().unwrap(); 308 309 for (gpa, load) in &mem_info.ram_maps { 310 self.write_load(load.mem_offset_in_elf, *gpa, load.mem_size, 0, dump_state)?; 311 } 312 313 Ok(()) 314 } 315 316 fn elf_note_size(&self, hdr_size: u32, name_size: u32, desc_size: u32) -> u32 { 317 (div_round_up!(hdr_size, 4) + div_round_up!(name_size, 4) + div_round_up!(desc_size, 4)) * 4 318 } 319 320 fn get_note_size(&self, desc_type: NoteDescType, nr_cpus: u32) -> u32 { 321 let note_head_size = std::mem::size_of::<elf::Elf64_Nhdr>() as u32; 322 let elf_desc_size = std::mem::size_of::<X86_64ElfPrStatus>() as u32; 323 let cpu_state_desc_size = std::mem::size_of::<CpuState>() as u32; 324 325 let elf_note_size = self.elf_note_size(note_head_size, COREDUMP_NAME_SIZE, elf_desc_size); 326 let vmm_note_size = 327 self.elf_note_size(note_head_size, COREDUMP_NAME_SIZE, cpu_state_desc_size); 328 329 match desc_type { 330 NoteDescType::Elf => elf_note_size * nr_cpus, 331 NoteDescType::Vmm => vmm_note_size * nr_cpus, 332 NoteDescType::ElfAndVmm => (elf_note_size + vmm_note_size) * nr_cpus, 333 } 334 } 335 } 336 337 pub trait CpuElf64Writable { 338 fn cpu_write_elf64_note( 339 &mut self, 340 _dump_state: &DumpState, 341 ) -> std::result::Result<(), GuestDebuggableError> { 342 Ok(()) 343 } 344 345 fn cpu_write_vmm_note( 346 &mut self, 347 _dump_state: &DumpState, 348 ) -> std::result::Result<(), GuestDebuggableError> { 349 Ok(()) 350 } 351 } 352