1 // Copyright © 2022 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 5 #![no_main] 6 7 use libfuzzer_sys::fuzz_target; 8 use seccompiler::SeccompAction; 9 use std::os::unix::io::{AsRawFd, FromRawFd}; 10 use std::sync::Arc; 11 use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType}; 12 use virtio_queue::{Queue, QueueT}; 13 use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic}; 14 use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK}; 15 16 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>; 17 18 const QUEUE_DATA_SIZE: usize = 4; 19 const MEM_SIZE: usize = 512 * 1024; 20 const BALLOON_SIZE: u64 = 512 * 1024; 21 // Number of queues 22 const QUEUE_NUM: usize = 3; 23 // Max entries in the queue. 24 const QUEUE_SIZE: u16 = 64; 25 // Descriptor table alignment 26 const DESC_TABLE_ALIGN_SIZE: u64 = 16; 27 // Available ring alignment 28 const AVAIL_RING_ALIGN_SIZE: u64 = 2; 29 // Used ring alignment 30 const USED_RING_ALIGN_SIZE: u64 = 4; 31 // Descriptor table size 32 const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64; 33 // Available ring size 34 const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64; 35 // Used ring size 36 const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64; 37 38 fuzz_target!(|bytes| { 39 if bytes.len() < QUEUE_DATA_SIZE * QUEUE_NUM 40 || bytes.len() > (QUEUE_DATA_SIZE * QUEUE_NUM + MEM_SIZE) 41 { 42 return; 43 } 44 45 let mut balloon = virtio_devices::Balloon::new( 46 "fuzzer_balloon".to_owned(), 47 BALLOON_SIZE, 48 true, 49 true, 50 SeccompAction::Allow, 51 EventFd::new(EFD_NONBLOCK).unwrap(), 52 None, 53 ) 54 .unwrap(); 55 56 let queue_data = &bytes[..QUEUE_DATA_SIZE * QUEUE_NUM]; 57 let mem_bytes = &bytes[QUEUE_DATA_SIZE * QUEUE_NUM..]; 58 59 // Setup the guest memory with the input bytes 60 let mem = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap(); 61 if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() { 62 return; 63 } 64 let guest_memory = GuestMemoryAtomic::new(mem); 65 66 // Setup the virt queues with the input bytes 67 let mut queues = setup_virt_queues( 68 &[ 69 &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(), 70 &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2] 71 .try_into() 72 .unwrap(), 73 &queue_data[QUEUE_DATA_SIZE * 2..QUEUE_DATA_SIZE * 3] 74 .try_into() 75 .unwrap(), 76 ], 77 0, 78 ); 79 80 let inflate_q = queues.remove(0); 81 let inflate_evt = EventFd::new(0).unwrap(); 82 let inflate_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(inflate_evt.as_raw_fd())) }; 83 let deflate_q = queues.remove(0); 84 let deflate_evt = EventFd::new(0).unwrap(); 85 let deflate_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(deflate_evt.as_raw_fd())) }; 86 let reporting_q = queues.remove(0); 87 let reporting_evt = EventFd::new(0).unwrap(); 88 let reporting_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(reporting_evt.as_raw_fd())) }; 89 90 // Kick the 'queue' events before activate the balloon device 91 inflate_queue_evt.write(1).unwrap(); 92 deflate_queue_evt.write(1).unwrap(); 93 reporting_queue_evt.write(1).unwrap(); 94 95 balloon 96 .activate( 97 guest_memory, 98 Arc::new(NoopVirtioInterrupt {}), 99 vec![ 100 (0, inflate_q, inflate_evt), 101 (1, deflate_q, deflate_evt), 102 (2, reporting_q, reporting_evt), 103 ], 104 ) 105 .ok(); 106 107 // Wait for the events to finish and balloon device worker thread to return 108 balloon.wait_for_epoll_threads(); 109 }); 110 111 pub struct NoopVirtioInterrupt {} 112 113 impl VirtioInterrupt for NoopVirtioInterrupt { 114 fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> { 115 Ok(()) 116 } 117 } 118 119 macro_rules! align { 120 ($n:expr, $align:expr) => {{ 121 (($n + $align - 1) / $align) * $align 122 }}; 123 } 124 125 fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> { 126 let mut queues = Vec::new(); 127 let mut base_addr = base_addr; 128 for b in bytes { 129 let mut q = Queue::new(QUEUE_SIZE).unwrap(); 130 131 let desc_table_addr = align!(base_addr, DESC_TABLE_ALIGN_SIZE); 132 let avail_ring_addr = align!(desc_table_addr + DESC_TABLE_SIZE, AVAIL_RING_ALIGN_SIZE); 133 let used_ring_addr = align!(avail_ring_addr + AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE); 134 q.try_set_desc_table_address(GuestAddress(desc_table_addr)) 135 .unwrap(); 136 q.try_set_avail_ring_address(GuestAddress(avail_ring_addr)) 137 .unwrap(); 138 q.try_set_used_ring_address(GuestAddress(used_ring_addr)) 139 .unwrap(); 140 141 q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small 142 q.set_next_used(b[1] as u16); 143 q.set_event_idx(b[2] % 2 != 0); 144 q.set_size(b[3] as u16 % QUEUE_SIZE); 145 146 q.set_ready(true); 147 queues.push(q); 148 149 base_addr = used_ring_addr + USED_RING_SIZE; 150 } 151 152 queues 153 } 154