1 // Copyright © 2022 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 5 #![no_main] 6 7 use std::fs::File; 8 use std::io::{Read, Write}; 9 use std::os::unix::io::{AsRawFd, FromRawFd}; 10 use std::sync::Arc; 11 12 use libfuzzer_sys::{fuzz_target, Corpus}; 13 use seccompiler::SeccompAction; 14 use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType}; 15 use virtio_queue::{Queue, QueueT}; 16 use vm_memory::bitmap::AtomicBitmap; 17 use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic}; 18 use vmm::EpollContext; 19 use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; 20 21 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 22 23 macro_rules! align { 24 ($n:expr, $align:expr) => {{ 25 $n.div_ceil($align) * $align 26 }}; 27 } 28 29 const TAP_INPUT_SIZE: usize = 128; 30 const QUEUE_DATA_SIZE: usize = 4; 31 const MEM_SIZE: usize = 32 * 1024 * 1024; 32 // Guest memory gap 33 const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024; 34 // Guest physical address for the first virt queue 35 const BASE_VIRT_QUEUE_ADDR: u64 = MEM_SIZE as u64 + GUEST_MEM_GAP; 36 // Number of queues 37 const QUEUE_NUM: usize = 2; 38 // Max entries in the queue. 39 const QUEUE_SIZE: u16 = 256; 40 // Descriptor table alignment 41 const DESC_TABLE_ALIGN_SIZE: u64 = 16; 42 // Used ring alignment 43 const USED_RING_ALIGN_SIZE: u64 = 4; 44 // Descriptor table size 45 const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64; 46 // Available ring size 47 const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64; 48 // Padding size before used ring 49 const PADDING_SIZE: u64 = align!(AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE) - AVAIL_RING_SIZE; 50 // Used ring size 51 const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64; 52 // Virtio-queue size in bytes 53 const QUEUE_BYTES_SIZE: usize = align!( 54 DESC_TABLE_SIZE + AVAIL_RING_SIZE + PADDING_SIZE + USED_RING_SIZE, 55 DESC_TABLE_ALIGN_SIZE 56 ) as usize; 57 58 fuzz_target!(|bytes: &[u8]| -> Corpus { 59 if bytes.len() < TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM 60 || bytes.len() 61 > TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + MEM_SIZE 62 { 63 return Corpus::Reject; 64 } 65 66 let (dummy_tap_frontend, dummy_tap_backend) = create_socketpair().unwrap(); 67 let if_name = "fuzzer_tap_name".as_bytes().to_vec(); 68 let tap = net_util::Tap::new_for_fuzzing(dummy_tap_frontend, if_name); 69 70 let mut net = virtio_devices::Net::new_with_tap( 71 "fuzzer_net".to_owned(), 72 vec![tap], 73 None, // guest_mac 74 false, // iommu 75 QUEUE_NUM, 76 QUEUE_SIZE, 77 SeccompAction::Allow, 78 None, 79 EventFd::new(EFD_NONBLOCK).unwrap(), 80 None, 81 true, 82 true, 83 true, 84 ) 85 .unwrap(); 86 87 let tap_input_bytes = &bytes[..TAP_INPUT_SIZE]; 88 let queue_data = &bytes[TAP_INPUT_SIZE..TAP_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM]; 89 let queue_bytes = &bytes[TAP_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM 90 ..TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM]; 91 let mem_bytes = &bytes[TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM..]; 92 93 // Setup the virt queues with the input bytes 94 let mut queues = setup_virt_queues( 95 &[ 96 &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(), 97 &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2] 98 .try_into() 99 .unwrap(), 100 ], 101 BASE_VIRT_QUEUE_ADDR, 102 ); 103 104 // Setup the guest memory with the input bytes 105 let mem = GuestMemoryMmap::from_ranges(&[ 106 (GuestAddress(0), MEM_SIZE), 107 (GuestAddress(BASE_VIRT_QUEUE_ADDR), queue_bytes.len()), 108 ]) 109 .unwrap(); 110 if mem 111 .write_slice(queue_bytes, GuestAddress(BASE_VIRT_QUEUE_ADDR)) 112 .is_err() 113 { 114 return Corpus::Reject; 115 } 116 if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() { 117 return Corpus::Reject; 118 } 119 let guest_memory = GuestMemoryAtomic::new(mem); 120 121 let input_queue = queues.remove(0); 122 let input_evt = EventFd::new(0).unwrap(); 123 let input_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(input_evt.as_raw_fd())) }; 124 let output_queue = queues.remove(0); 125 let output_evt = EventFd::new(0).unwrap(); 126 let output_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(output_evt.as_raw_fd())) }; 127 128 // Start the thread of dummy tap backend to handle the rx and tx from the virtio-net 129 let exit_evt = EventFd::new(libc::EFD_NONBLOCK).unwrap(); 130 let tap_backend_thread = { 131 let dummy_tap_backend = dummy_tap_backend.try_clone().unwrap(); 132 let tap_input_bytes: [u8; TAP_INPUT_SIZE] = tap_input_bytes[..].try_into().unwrap(); 133 let exit_evt = exit_evt.try_clone().unwrap(); 134 std::thread::Builder::new() 135 .name("dummy_tap_backend".to_string()) 136 .spawn(move || { 137 tap_backend_stub(dummy_tap_backend, &tap_input_bytes, exit_evt); 138 }) 139 .unwrap() 140 }; 141 142 // Kick the 'queue' events and endpoint event before activate the net device 143 input_queue_evt.write(1).unwrap(); 144 output_queue_evt.write(1).unwrap(); 145 146 net.activate( 147 guest_memory, 148 Arc::new(NoopVirtioInterrupt {}), 149 vec![(0, input_queue, input_evt), (1, output_queue, output_evt)], 150 ) 151 .unwrap(); 152 153 // Wait for the events to finish and net device worker thread to return 154 net.wait_for_epoll_threads(); 155 // Terminate the thread for the dummy tap backend 156 exit_evt.write(1).ok(); 157 tap_backend_thread.join().unwrap(); 158 159 return Corpus::Keep; 160 }); 161 162 pub struct NoopVirtioInterrupt {} 163 164 impl VirtioInterrupt for NoopVirtioInterrupt { 165 fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { 166 Ok(()) 167 } 168 } 169 170 fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> { 171 let mut queues = Vec::new(); 172 for (i, b) in bytes.iter().enumerate() { 173 let mut q = Queue::new(QUEUE_SIZE).unwrap(); 174 175 let desc_table_addr = base_addr + (QUEUE_BYTES_SIZE * i) as u64; 176 let avail_ring_addr = desc_table_addr + DESC_TABLE_SIZE; 177 let used_ring_addr = avail_ring_addr + PADDING_SIZE + AVAIL_RING_SIZE; 178 q.try_set_desc_table_address(GuestAddress(desc_table_addr)) 179 .unwrap(); 180 q.try_set_avail_ring_address(GuestAddress(avail_ring_addr)) 181 .unwrap(); 182 q.try_set_used_ring_address(GuestAddress(used_ring_addr)) 183 .unwrap(); 184 185 q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small 186 q.set_next_used(b[1] as u16); 187 q.set_event_idx(b[2] % 2 != 0); 188 q.set_size(b[3] as u16 % QUEUE_SIZE); 189 190 q.set_ready(true); 191 queues.push(q); 192 } 193 194 queues 195 } 196 197 fn create_socketpair() -> Result<(File, File), std::io::Error> { 198 let mut fds = [-1, -1]; 199 unsafe { 200 let ret = libc::socketpair( 201 libc::AF_UNIX, 202 libc::SOCK_STREAM | libc::SOCK_NONBLOCK, 203 0, 204 fds.as_mut_ptr(), 205 ); 206 if ret == -1 { 207 return Err(std::io::Error::last_os_error()); 208 } 209 } 210 211 let socket1 = unsafe { File::from_raw_fd(fds[0]) }; 212 let socket2 = unsafe { File::from_raw_fd(fds[1]) }; 213 Ok((socket1, socket2)) 214 } 215 216 enum EpollEvent { 217 Exit = 0, 218 Rx = 1, 219 Tx = 2, 220 Unknown, 221 } 222 223 impl From<u64> for EpollEvent { 224 fn from(v: u64) -> Self { 225 use EpollEvent::*; 226 match v { 227 0 => Exit, 228 1 => Rx, 229 2 => Tx, 230 _ => Unknown, 231 } 232 } 233 } 234 235 // Handle the rx and tx requests from the virtio-net device 236 fn tap_backend_stub( 237 mut dummy_tap: File, 238 tap_input_bytes: &[u8; TAP_INPUT_SIZE], 239 exit_evt: EventFd, 240 ) { 241 let mut epoll = EpollContext::new().unwrap(); 242 epoll 243 .add_event_custom(&exit_evt, EpollEvent::Exit as u64, epoll::Events::EPOLLIN) 244 .unwrap(); 245 let dummy_tap_write = dummy_tap.try_clone().unwrap(); 246 epoll 247 .add_event_custom( 248 &dummy_tap_write, 249 EpollEvent::Rx as u64, 250 epoll::Events::EPOLLOUT, 251 ) 252 .unwrap(); 253 epoll 254 .add_event_custom(&dummy_tap, EpollEvent::Tx as u64, epoll::Events::EPOLLIN) 255 .unwrap(); 256 257 let epoll_fd = epoll.as_raw_fd(); 258 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); 3]; 259 loop { 260 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 261 Ok(num_events) => num_events, 262 Err(e) => match e.raw_os_error() { 263 Some(libc::EAGAIN) | Some(libc::EINTR) => continue, 264 _ => panic!("Unexpected epoll::wait error!"), 265 }, 266 }; 267 268 for event in events.iter().take(num_events) { 269 let dispatch_event: EpollEvent = event.data.into(); 270 match dispatch_event { 271 EpollEvent::Exit => { 272 return; 273 } 274 EpollEvent::Rx => { 275 dummy_tap.write_all(tap_input_bytes).unwrap(); 276 break; 277 } 278 EpollEvent::Tx => { 279 let mut buffer = Vec::new(); 280 dummy_tap.read_to_end(&mut buffer).ok(); 281 break; 282 } 283 _ => { 284 panic!("Unexpected Epoll event"); 285 } 286 } 287 } 288 } 289 } 290