1 // Copyright © 2022 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 5 #![no_main] 6 7 use libfuzzer_sys::fuzz_target; 8 use seccompiler::SeccompAction; 9 use std::fs::File; 10 use std::io::{Read, Write}; 11 use std::os::unix::io::{AsRawFd, FromRawFd}; 12 use std::sync::Arc; 13 use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType}; 14 use virtio_queue::{Queue, QueueT}; 15 use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic}; 16 use vmm::EpollContext; 17 use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; 18 19 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 20 21 macro_rules! align { 22 ($n:expr, $align:expr) => {{ 23 (($n + $align - 1) / $align) * $align 24 }}; 25 } 26 27 const TAP_INPUT_SIZE: usize = 128; 28 const QUEUE_DATA_SIZE: usize = 4; 29 const MEM_SIZE: usize = 32 * 1024 * 1024; 30 // Guest memory gap 31 const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024; 32 // Guest physical address for the first virt queue 33 const BASE_VIRT_QUEUE_ADDR: u64 = MEM_SIZE as u64 + GUEST_MEM_GAP; 34 // Number of queues 35 const QUEUE_NUM: usize = 2; 36 // Max entries in the queue. 37 const QUEUE_SIZE: u16 = 256; 38 // Descriptor table alignment 39 const DESC_TABLE_ALIGN_SIZE: u64 = 16; 40 // Used ring alignment 41 const USED_RING_ALIGN_SIZE: u64 = 4; 42 // Descriptor table size 43 const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64; 44 // Available ring size 45 const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64; 46 // Padding size before used ring 47 const PADDING_SIZE: u64 = align!(AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE) - AVAIL_RING_SIZE; 48 // Used ring size 49 const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64; 50 // Virtio-queue size in bytes 51 const QUEUE_BYTES_SIZE: usize = align!( 52 DESC_TABLE_SIZE + AVAIL_RING_SIZE + PADDING_SIZE + USED_RING_SIZE, 53 DESC_TABLE_ALIGN_SIZE 54 ) as usize; 55 56 fuzz_target!(|bytes| { 57 if bytes.len() < TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM 58 || bytes.len() 59 > TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + MEM_SIZE 60 { 61 return; 62 } 63 64 let (dummy_tap_frontend, dummy_tap_backend) = create_socketpair().unwrap(); 65 let if_name = "fuzzer_tap_name".as_bytes().to_vec(); 66 let tap = net_util::Tap::new_for_fuzzing(dummy_tap_frontend, if_name); 67 68 let mut net = virtio_devices::Net::new_with_tap( 69 "fuzzer_net".to_owned(), 70 vec![tap], 71 None, // guest_mac 72 false, // iommu 73 QUEUE_NUM, 74 QUEUE_SIZE, 75 SeccompAction::Allow, 76 None, 77 EventFd::new(EFD_NONBLOCK).unwrap(), 78 None, 79 ) 80 .unwrap(); 81 82 let tap_input_bytes = &bytes[..TAP_INPUT_SIZE]; 83 let queue_data = &bytes[TAP_INPUT_SIZE..TAP_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM]; 84 let queue_bytes = &bytes[TAP_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM 85 ..TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM]; 86 let mem_bytes = &bytes[TAP_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM..]; 87 88 // Setup the virt queues with the input bytes 89 let mut queues = setup_virt_queues( 90 &[ 91 &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(), 92 &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2] 93 .try_into() 94 .unwrap(), 95 ], 96 BASE_VIRT_QUEUE_ADDR, 97 ); 98 99 // Setup the guest memory with the input bytes 100 let mem = GuestMemoryMmap::from_ranges(&[ 101 (GuestAddress(0), MEM_SIZE), 102 (GuestAddress(BASE_VIRT_QUEUE_ADDR), queue_bytes.len()), 103 ]) 104 .unwrap(); 105 if mem 106 .write_slice(queue_bytes, GuestAddress(BASE_VIRT_QUEUE_ADDR)) 107 .is_err() 108 { 109 return; 110 } 111 if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() { 112 return; 113 } 114 let guest_memory = GuestMemoryAtomic::new(mem); 115 116 let input_queue = queues.remove(0); 117 let input_evt = EventFd::new(0).unwrap(); 118 let input_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(input_evt.as_raw_fd())) }; 119 let output_queue = queues.remove(0); 120 let output_evt = EventFd::new(0).unwrap(); 121 let output_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(output_evt.as_raw_fd())) }; 122 123 // Start the thread of dummy tap backend to handle the rx and tx from the virtio-net 124 let exit_evt = EventFd::new(libc::EFD_NONBLOCK).unwrap(); 125 let tap_backend_thread = { 126 let dummy_tap_backend = dummy_tap_backend.try_clone().unwrap(); 127 let tap_input_bytes: [u8; TAP_INPUT_SIZE] = tap_input_bytes[..].try_into().unwrap(); 128 let exit_evt = exit_evt.try_clone().unwrap(); 129 std::thread::Builder::new() 130 .name("dummy_tap_backend".to_string()) 131 .spawn(move || { 132 tap_backend_stub(dummy_tap_backend, &tap_input_bytes, exit_evt); 133 }) 134 .unwrap() 135 }; 136 137 // Kick the 'queue' events and endpoint event before activate the net device 138 input_queue_evt.write(1).unwrap(); 139 output_queue_evt.write(1).unwrap(); 140 141 net.activate( 142 guest_memory, 143 Arc::new(NoopVirtioInterrupt {}), 144 vec![(0, input_queue, input_evt), (1, output_queue, output_evt)], 145 ) 146 .unwrap(); 147 148 // Wait for the events to finish and net device worker thread to return 149 net.wait_for_epoll_threads(); 150 // Terminate the thread for the dummy tap backend 151 exit_evt.write(1).ok(); 152 tap_backend_thread.join().unwrap(); 153 }); 154 155 pub struct NoopVirtioInterrupt {} 156 157 impl VirtioInterrupt for NoopVirtioInterrupt { 158 fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { 159 Ok(()) 160 } 161 } 162 163 fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> { 164 let mut queues = Vec::new(); 165 for (i, b) in bytes.iter().enumerate() { 166 let mut q = Queue::new(QUEUE_SIZE).unwrap(); 167 168 let desc_table_addr = base_addr + (QUEUE_BYTES_SIZE * i) as u64; 169 let avail_ring_addr = desc_table_addr + DESC_TABLE_SIZE; 170 let used_ring_addr = avail_ring_addr + PADDING_SIZE + AVAIL_RING_SIZE; 171 q.try_set_desc_table_address(GuestAddress(desc_table_addr)) 172 .unwrap(); 173 q.try_set_avail_ring_address(GuestAddress(avail_ring_addr)) 174 .unwrap(); 175 q.try_set_used_ring_address(GuestAddress(used_ring_addr)) 176 .unwrap(); 177 178 q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small 179 q.set_next_used(b[1] as u16); 180 q.set_event_idx(b[2] % 2 != 0); 181 q.set_size(b[3] as u16 % QUEUE_SIZE); 182 183 q.set_ready(true); 184 queues.push(q); 185 } 186 187 queues 188 } 189 190 fn create_socketpair() -> Result<(File, File), std::io::Error> { 191 let mut fds = [-1, -1]; 192 unsafe { 193 let ret = libc::socketpair( 194 libc::AF_UNIX, 195 libc::SOCK_STREAM | libc::SOCK_NONBLOCK, 196 0, 197 fds.as_mut_ptr(), 198 ); 199 if ret == -1 { 200 return Err(std::io::Error::last_os_error()); 201 } 202 } 203 204 let socket1 = unsafe { File::from_raw_fd(fds[0]) }; 205 let socket2 = unsafe { File::from_raw_fd(fds[1]) }; 206 Ok((socket1, socket2)) 207 } 208 209 enum EpollEvent { 210 Exit = 0, 211 Rx = 1, 212 Tx = 2, 213 Unknown, 214 } 215 216 impl From<u64> for EpollEvent { 217 fn from(v: u64) -> Self { 218 use EpollEvent::*; 219 match v { 220 0 => Exit, 221 1 => Rx, 222 2 => Tx, 223 _ => Unknown, 224 } 225 } 226 } 227 228 // Handle the rx and tx requests from the virtio-net device 229 fn tap_backend_stub( 230 mut dummy_tap: File, 231 tap_input_bytes: &[u8; TAP_INPUT_SIZE], 232 exit_evt: EventFd, 233 ) { 234 let mut epoll = EpollContext::new().unwrap(); 235 epoll 236 .add_event_custom(&exit_evt, EpollEvent::Exit as u64, epoll::Events::EPOLLIN) 237 .unwrap(); 238 let dummy_tap_write = dummy_tap.try_clone().unwrap(); 239 epoll 240 .add_event_custom( 241 &dummy_tap_write, 242 EpollEvent::Rx as u64, 243 epoll::Events::EPOLLOUT, 244 ) 245 .unwrap(); 246 epoll 247 .add_event_custom(&dummy_tap, EpollEvent::Tx as u64, epoll::Events::EPOLLIN) 248 .unwrap(); 249 250 let epoll_fd = epoll.as_raw_fd(); 251 let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); 3]; 252 loop { 253 let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) { 254 Ok(num_events) => num_events, 255 Err(e) => match e.raw_os_error() { 256 Some(libc::EAGAIN) | Some(libc::EINTR) => continue, 257 _ => panic!("Unexpected epoll::wait error!"), 258 }, 259 }; 260 261 for event in events.iter().take(num_events) { 262 let dispatch_event: EpollEvent = event.data.into(); 263 match dispatch_event { 264 EpollEvent::Exit => { 265 return; 266 } 267 EpollEvent::Rx => { 268 dummy_tap.write_all(tap_input_bytes).unwrap(); 269 break; 270 } 271 EpollEvent::Tx => { 272 let mut buffer = Vec::new(); 273 dummy_tap.read_to_end(&mut buffer).ok(); 274 break; 275 } 276 _ => { 277 panic!("Unexpected Epoll event"); 278 } 279 } 280 } 281 } 282 } 283