xref: /cloud-hypervisor/fuzz/fuzz_targets/console.rs (revision 6f8bd27cf7629733582d930519e98d19e90afb16)
1 // Copyright © 2022 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 
5 #![no_main]
6 
7 use libfuzzer_sys::fuzz_target;
8 use seccompiler::SeccompAction;
9 use std::fs::File;
10 use std::io::Write;
11 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
12 use std::sync::Arc;
13 use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType};
14 use virtio_queue::{Queue, QueueT};
15 use vm_memory::{bitmap::AtomicBitmap, Bytes, GuestAddress, GuestMemoryAtomic};
16 use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
17 
18 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
19 
20 macro_rules! align {
21     ($n:expr, $align:expr) => {{
22         (($n + $align - 1) / $align) * $align
23     }};
24 }
25 
26 const CONSOLE_INPUT_SIZE: usize = 128;
27 const QUEUE_DATA_SIZE: usize = 4;
28 const MEM_SIZE: usize = 32 * 1024 * 1024;
29 // Guest memory gap
30 const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024;
31 // Guest physical address for the first virt queue
32 const BASE_VIRT_QUEUE_ADDR: u64 = MEM_SIZE as u64 + GUEST_MEM_GAP;
33 // Number of queues
34 const QUEUE_NUM: usize = 2;
35 // Max entries in the queue.
36 const QUEUE_SIZE: u16 = 256;
37 // Descriptor table alignment
38 const DESC_TABLE_ALIGN_SIZE: u64 = 16;
39 // Used ring alignment
40 const USED_RING_ALIGN_SIZE: u64 = 4;
41 // Descriptor table size
42 const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64;
43 // Available ring size
44 const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64;
45 // Padding size before used ring
46 const PADDING_SIZE: u64 = align!(AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE) - AVAIL_RING_SIZE;
47 // Used ring size
48 const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64;
49 // Virtio-queue size in bytes
50 const QUEUE_BYTES_SIZE: usize = align!(
51     DESC_TABLE_SIZE + AVAIL_RING_SIZE + PADDING_SIZE + USED_RING_SIZE,
52     DESC_TABLE_ALIGN_SIZE
53 ) as usize;
54 
55 fuzz_target!(|bytes| {
56     if bytes.len() < (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE
57         || bytes.len()
58             > (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE + MEM_SIZE
59     {
60         return;
61     }
62 
63     let (pipe_rx, mut pipe_tx) = create_pipe().unwrap();
64     let output = unsafe {
65         File::from_raw_fd(
66             memfd_create(&std::ffi::CString::new("fuzz_console_output").unwrap()).unwrap(),
67         )
68     };
69     let endpoint = virtio_devices::Endpoint::FilePair(output, pipe_rx);
70 
71     let (mut console, _) = virtio_devices::Console::new(
72         "fuzzer_console".to_owned(),
73         endpoint,
74         None,  // resize_pipe
75         false, // iommu
76         SeccompAction::Allow,
77         EventFd::new(EFD_NONBLOCK).unwrap(),
78         None,
79     )
80     .unwrap();
81 
82     let console_input_bytes = &bytes[..CONSOLE_INPUT_SIZE];
83     let queue_data = &bytes[CONSOLE_INPUT_SIZE..CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM];
84     let queue_bytes = &bytes[CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM
85         ..CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM];
86     let mem_bytes = &bytes[CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM..];
87 
88     // Setup the virt queues with the input bytes
89     let mut queues = setup_virt_queues(
90         &[
91             &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(),
92             &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2]
93                 .try_into()
94                 .unwrap(),
95         ],
96         BASE_VIRT_QUEUE_ADDR,
97     );
98 
99     // Setup the guest memory with the input bytes
100     let mem = GuestMemoryMmap::from_ranges(&[
101         (GuestAddress(0), MEM_SIZE),
102         (GuestAddress(BASE_VIRT_QUEUE_ADDR), queue_bytes.len()),
103     ])
104     .unwrap();
105     if mem
106         .write_slice(queue_bytes, GuestAddress(BASE_VIRT_QUEUE_ADDR))
107         .is_err()
108     {
109         return;
110     }
111     if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() {
112         return;
113     }
114     let guest_memory = GuestMemoryAtomic::new(mem);
115 
116     let input_queue = queues.remove(0);
117     let input_evt = EventFd::new(0).unwrap();
118     let input_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(input_evt.as_raw_fd())) };
119     let output_queue = queues.remove(0);
120     let output_evt = EventFd::new(0).unwrap();
121     let output_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(output_evt.as_raw_fd())) };
122 
123     // Kick the 'queue' events and endpoint event before activate the console device
124     input_queue_evt.write(1).unwrap();
125     output_queue_evt.write(1).unwrap();
126     pipe_tx.write_all(console_input_bytes).unwrap(); // To use fuzzed data;
127 
128     console
129         .activate(
130             guest_memory,
131             Arc::new(NoopVirtioInterrupt {}),
132             vec![(0, input_queue, input_evt), (1, output_queue, output_evt)],
133         )
134         .unwrap();
135 
136     // Wait for the events to finish and console device worker thread to return
137     console.wait_for_epoll_threads();
138 });
139 
140 pub struct NoopVirtioInterrupt {}
141 
142 impl VirtioInterrupt for NoopVirtioInterrupt {
143     fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> {
144         Ok(())
145     }
146 }
147 
148 fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> {
149     let mut queues = Vec::new();
150     for (i, b) in bytes.iter().enumerate() {
151         let mut q = Queue::new(QUEUE_SIZE).unwrap();
152 
153         let desc_table_addr = base_addr + (QUEUE_BYTES_SIZE * i) as u64;
154         let avail_ring_addr = desc_table_addr + DESC_TABLE_SIZE;
155         let used_ring_addr = avail_ring_addr + PADDING_SIZE + AVAIL_RING_SIZE;
156         q.try_set_desc_table_address(GuestAddress(desc_table_addr))
157             .unwrap();
158         q.try_set_avail_ring_address(GuestAddress(avail_ring_addr))
159             .unwrap();
160         q.try_set_used_ring_address(GuestAddress(used_ring_addr))
161             .unwrap();
162 
163         q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small
164         q.set_next_used(b[1] as u16);
165         q.set_event_idx(b[2] % 2 != 0);
166         q.set_size(b[3] as u16 % QUEUE_SIZE);
167 
168         q.set_ready(true);
169         queues.push(q);
170     }
171 
172     queues
173 }
174 
175 fn memfd_create(name: &std::ffi::CStr) -> Result<RawFd, std::io::Error> {
176     let res = unsafe { libc::syscall(libc::SYS_memfd_create, name.as_ptr(), 0) };
177 
178     if res < 0 {
179         Err(std::io::Error::last_os_error())
180     } else {
181         Ok(res as RawFd)
182     }
183 }
184 
185 fn create_pipe() -> Result<(File, File), std::io::Error> {
186     let mut pipe = [-1; 2];
187     if unsafe { libc::pipe2(pipe.as_mut_ptr(), libc::O_CLOEXEC) } == -1 {
188         return Err(std::io::Error::last_os_error());
189     }
190     let rx = unsafe { File::from_raw_fd(pipe[0]) };
191     let tx = unsafe { File::from_raw_fd(pipe[1]) };
192 
193     Ok((rx, tx))
194 }
195