xref: /cloud-hypervisor/fuzz/fuzz_targets/console.rs (revision 6fd5b0f696be5a81efd050566eec112a78533468)
1ef8fb9bdSBo Chen // Copyright © 2022 Intel Corporation
2ef8fb9bdSBo Chen //
3ef8fb9bdSBo Chen // SPDX-License-Identifier: Apache-2.0
4ef8fb9bdSBo Chen 
5ef8fb9bdSBo Chen #![no_main]
6ef8fb9bdSBo Chen 
7ef8fb9bdSBo Chen use std::fs::File;
8ef8fb9bdSBo Chen use std::io::Write;
9ef8fb9bdSBo Chen use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
10ef8fb9bdSBo Chen use std::sync::Arc;
11f041c940SRob Bradford 
12*6fd5b0f6SWei Liu use libfuzzer_sys::{fuzz_target, Corpus};
13f041c940SRob Bradford use seccompiler::SeccompAction;
14ef8fb9bdSBo Chen use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType};
15ef8fb9bdSBo Chen use virtio_queue::{Queue, QueueT};
1661e57e1cSRuoqing He use vm_memory::bitmap::AtomicBitmap;
1761e57e1cSRuoqing He use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic};
18ef8fb9bdSBo Chen use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
19ef8fb9bdSBo Chen 
20ef8fb9bdSBo Chen type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
21ef8fb9bdSBo Chen 
22ef8fb9bdSBo Chen macro_rules! align {
23ef8fb9bdSBo Chen     ($n:expr, $align:expr) => {{
246164aa08SRuoqing He         $n.div_ceil($align) * $align
25ef8fb9bdSBo Chen     }};
26ef8fb9bdSBo Chen }
27ef8fb9bdSBo Chen 
28ef8fb9bdSBo Chen const CONSOLE_INPUT_SIZE: usize = 128;
29ef8fb9bdSBo Chen const QUEUE_DATA_SIZE: usize = 4;
30ef8fb9bdSBo Chen const MEM_SIZE: usize = 32 * 1024 * 1024;
31ef8fb9bdSBo Chen // Guest memory gap
32ef8fb9bdSBo Chen const GUEST_MEM_GAP: u64 = 1 * 1024 * 1024;
33ef8fb9bdSBo Chen // Guest physical address for the first virt queue
34ef8fb9bdSBo Chen const BASE_VIRT_QUEUE_ADDR: u64 = MEM_SIZE as u64 + GUEST_MEM_GAP;
35ef8fb9bdSBo Chen // Number of queues
36ef8fb9bdSBo Chen const QUEUE_NUM: usize = 2;
37ef8fb9bdSBo Chen // Max entries in the queue.
38ef8fb9bdSBo Chen const QUEUE_SIZE: u16 = 256;
39ef8fb9bdSBo Chen // Descriptor table alignment
40ef8fb9bdSBo Chen const DESC_TABLE_ALIGN_SIZE: u64 = 16;
41ef8fb9bdSBo Chen // Used ring alignment
42ef8fb9bdSBo Chen const USED_RING_ALIGN_SIZE: u64 = 4;
43ef8fb9bdSBo Chen // Descriptor table size
44ef8fb9bdSBo Chen const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64;
45ef8fb9bdSBo Chen // Available ring size
46ef8fb9bdSBo Chen const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64;
47ef8fb9bdSBo Chen // Padding size before used ring
48ef8fb9bdSBo Chen const PADDING_SIZE: u64 = align!(AVAIL_RING_SIZE, USED_RING_ALIGN_SIZE) - AVAIL_RING_SIZE;
49ef8fb9bdSBo Chen // Used ring size
50ef8fb9bdSBo Chen const USED_RING_SIZE: u64 = 6_u64 + 8 * QUEUE_SIZE as u64;
51ef8fb9bdSBo Chen // Virtio-queue size in bytes
52ef8fb9bdSBo Chen const QUEUE_BYTES_SIZE: usize = align!(
53ef8fb9bdSBo Chen     DESC_TABLE_SIZE + AVAIL_RING_SIZE + PADDING_SIZE + USED_RING_SIZE,
54ef8fb9bdSBo Chen     DESC_TABLE_ALIGN_SIZE
55ef8fb9bdSBo Chen ) as usize;
56ef8fb9bdSBo Chen 
57*6fd5b0f6SWei Liu fuzz_target!(|bytes: &[u8]| -> Corpus {
58ef8fb9bdSBo Chen     if bytes.len() < (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE
59ef8fb9bdSBo Chen         || bytes.len()
60ef8fb9bdSBo Chen             > (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM + CONSOLE_INPUT_SIZE + MEM_SIZE
61ef8fb9bdSBo Chen     {
62*6fd5b0f6SWei Liu         return Corpus::Reject;
63ef8fb9bdSBo Chen     }
64ef8fb9bdSBo Chen 
65ef8fb9bdSBo Chen     let (pipe_rx, mut pipe_tx) = create_pipe().unwrap();
66ef8fb9bdSBo Chen     let output = unsafe {
67ef8fb9bdSBo Chen         File::from_raw_fd(
68ef8fb9bdSBo Chen             memfd_create(&std::ffi::CString::new("fuzz_console_output").unwrap()).unwrap(),
69ef8fb9bdSBo Chen         )
70ef8fb9bdSBo Chen     };
71f436231cSRuoqing He     let endpoint = virtio_devices::Endpoint::FilePair(Arc::new(output), Arc::new(pipe_rx));
72ef8fb9bdSBo Chen 
73ef8fb9bdSBo Chen     let (mut console, _) = virtio_devices::Console::new(
74ef8fb9bdSBo Chen         "fuzzer_console".to_owned(),
75ef8fb9bdSBo Chen         endpoint,
76ef8fb9bdSBo Chen         None,  // resize_pipe
77ef8fb9bdSBo Chen         false, // iommu
78ef8fb9bdSBo Chen         SeccompAction::Allow,
79ef8fb9bdSBo Chen         EventFd::new(EFD_NONBLOCK).unwrap(),
80ef8fb9bdSBo Chen         None,
81ef8fb9bdSBo Chen     )
82ef8fb9bdSBo Chen     .unwrap();
83ef8fb9bdSBo Chen 
84ef8fb9bdSBo Chen     let console_input_bytes = &bytes[..CONSOLE_INPUT_SIZE];
85ef8fb9bdSBo Chen     let queue_data = &bytes[CONSOLE_INPUT_SIZE..CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM];
86ef8fb9bdSBo Chen     let queue_bytes = &bytes[CONSOLE_INPUT_SIZE + QUEUE_DATA_SIZE * QUEUE_NUM
87ef8fb9bdSBo Chen         ..CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM];
88ef8fb9bdSBo Chen     let mem_bytes = &bytes[CONSOLE_INPUT_SIZE + (QUEUE_DATA_SIZE + QUEUE_BYTES_SIZE) * QUEUE_NUM..];
89ef8fb9bdSBo Chen 
90ef8fb9bdSBo Chen     // Setup the virt queues with the input bytes
91ef8fb9bdSBo Chen     let mut queues = setup_virt_queues(
92ef8fb9bdSBo Chen         &[
93ef8fb9bdSBo Chen             &queue_data[..QUEUE_DATA_SIZE].try_into().unwrap(),
94ef8fb9bdSBo Chen             &queue_data[QUEUE_DATA_SIZE..QUEUE_DATA_SIZE * 2]
95ef8fb9bdSBo Chen                 .try_into()
96ef8fb9bdSBo Chen                 .unwrap(),
97ef8fb9bdSBo Chen         ],
98ef8fb9bdSBo Chen         BASE_VIRT_QUEUE_ADDR,
99ef8fb9bdSBo Chen     );
100ef8fb9bdSBo Chen 
101ef8fb9bdSBo Chen     // Setup the guest memory with the input bytes
102ef8fb9bdSBo Chen     let mem = GuestMemoryMmap::from_ranges(&[
103ef8fb9bdSBo Chen         (GuestAddress(0), MEM_SIZE),
104ef8fb9bdSBo Chen         (GuestAddress(BASE_VIRT_QUEUE_ADDR), queue_bytes.len()),
105ef8fb9bdSBo Chen     ])
106ef8fb9bdSBo Chen     .unwrap();
107ef8fb9bdSBo Chen     if mem
108ef8fb9bdSBo Chen         .write_slice(queue_bytes, GuestAddress(BASE_VIRT_QUEUE_ADDR))
109ef8fb9bdSBo Chen         .is_err()
110ef8fb9bdSBo Chen     {
111*6fd5b0f6SWei Liu         return Corpus::Reject;
112ef8fb9bdSBo Chen     }
113ef8fb9bdSBo Chen     if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() {
114*6fd5b0f6SWei Liu         return Corpus::Reject;
115ef8fb9bdSBo Chen     }
116ef8fb9bdSBo Chen     let guest_memory = GuestMemoryAtomic::new(mem);
117ef8fb9bdSBo Chen 
118ef8fb9bdSBo Chen     let input_queue = queues.remove(0);
119ef8fb9bdSBo Chen     let input_evt = EventFd::new(0).unwrap();
120ef8fb9bdSBo Chen     let input_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(input_evt.as_raw_fd())) };
121ef8fb9bdSBo Chen     let output_queue = queues.remove(0);
122ef8fb9bdSBo Chen     let output_evt = EventFd::new(0).unwrap();
123ef8fb9bdSBo Chen     let output_queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(output_evt.as_raw_fd())) };
124ef8fb9bdSBo Chen 
125ef8fb9bdSBo Chen     // Kick the 'queue' events and endpoint event before activate the console device
126ef8fb9bdSBo Chen     input_queue_evt.write(1).unwrap();
127ef8fb9bdSBo Chen     output_queue_evt.write(1).unwrap();
128ef8fb9bdSBo Chen     pipe_tx.write_all(console_input_bytes).unwrap(); // To use fuzzed data;
129ef8fb9bdSBo Chen 
130ef8fb9bdSBo Chen     console
131ef8fb9bdSBo Chen         .activate(
132ef8fb9bdSBo Chen             guest_memory,
133ef8fb9bdSBo Chen             Arc::new(NoopVirtioInterrupt {}),
134ef8fb9bdSBo Chen             vec![(0, input_queue, input_evt), (1, output_queue, output_evt)],
135ef8fb9bdSBo Chen         )
136ef8fb9bdSBo Chen         .unwrap();
137ef8fb9bdSBo Chen 
138ef8fb9bdSBo Chen     // Wait for the events to finish and console device worker thread to return
139ef8fb9bdSBo Chen     console.wait_for_epoll_threads();
140*6fd5b0f6SWei Liu 
141*6fd5b0f6SWei Liu     Corpus::Keep
142ef8fb9bdSBo Chen });
143ef8fb9bdSBo Chen 
144ef8fb9bdSBo Chen pub struct NoopVirtioInterrupt {}
145ef8fb9bdSBo Chen 
146ef8fb9bdSBo Chen impl VirtioInterrupt for NoopVirtioInterrupt {
trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error>147ef8fb9bdSBo Chen     fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> {
148ef8fb9bdSBo Chen         Ok(())
149ef8fb9bdSBo Chen     }
150ef8fb9bdSBo Chen }
151ef8fb9bdSBo Chen 
setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue>152ef8fb9bdSBo Chen fn setup_virt_queues(bytes: &[&[u8; QUEUE_DATA_SIZE]], base_addr: u64) -> Vec<Queue> {
153ef8fb9bdSBo Chen     let mut queues = Vec::new();
154ef8fb9bdSBo Chen     for (i, b) in bytes.iter().enumerate() {
155ef8fb9bdSBo Chen         let mut q = Queue::new(QUEUE_SIZE).unwrap();
156ef8fb9bdSBo Chen 
157ef8fb9bdSBo Chen         let desc_table_addr = base_addr + (QUEUE_BYTES_SIZE * i) as u64;
158ef8fb9bdSBo Chen         let avail_ring_addr = desc_table_addr + DESC_TABLE_SIZE;
159ef8fb9bdSBo Chen         let used_ring_addr = avail_ring_addr + PADDING_SIZE + AVAIL_RING_SIZE;
160ef8fb9bdSBo Chen         q.try_set_desc_table_address(GuestAddress(desc_table_addr))
161ef8fb9bdSBo Chen             .unwrap();
162ef8fb9bdSBo Chen         q.try_set_avail_ring_address(GuestAddress(avail_ring_addr))
163ef8fb9bdSBo Chen             .unwrap();
164ef8fb9bdSBo Chen         q.try_set_used_ring_address(GuestAddress(used_ring_addr))
165ef8fb9bdSBo Chen             .unwrap();
166ef8fb9bdSBo Chen 
167ef8fb9bdSBo Chen         q.set_next_avail(b[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small
168ef8fb9bdSBo Chen         q.set_next_used(b[1] as u16);
169ef8fb9bdSBo Chen         q.set_event_idx(b[2] % 2 != 0);
170ef8fb9bdSBo Chen         q.set_size(b[3] as u16 % QUEUE_SIZE);
171ef8fb9bdSBo Chen 
172ef8fb9bdSBo Chen         q.set_ready(true);
173ef8fb9bdSBo Chen         queues.push(q);
174ef8fb9bdSBo Chen     }
175ef8fb9bdSBo Chen 
176ef8fb9bdSBo Chen     queues
177ef8fb9bdSBo Chen }
178ef8fb9bdSBo Chen 
memfd_create(name: &std::ffi::CStr) -> Result<RawFd, std::io::Error>179ef8fb9bdSBo Chen fn memfd_create(name: &std::ffi::CStr) -> Result<RawFd, std::io::Error> {
180ef8fb9bdSBo Chen     let res = unsafe { libc::syscall(libc::SYS_memfd_create, name.as_ptr(), 0) };
181ef8fb9bdSBo Chen 
182ef8fb9bdSBo Chen     if res < 0 {
183ef8fb9bdSBo Chen         Err(std::io::Error::last_os_error())
184ef8fb9bdSBo Chen     } else {
185ef8fb9bdSBo Chen         Ok(res as RawFd)
186ef8fb9bdSBo Chen     }
187ef8fb9bdSBo Chen }
188ef8fb9bdSBo Chen 
create_pipe() -> Result<(File, File), std::io::Error>189ef8fb9bdSBo Chen fn create_pipe() -> Result<(File, File), std::io::Error> {
190ef8fb9bdSBo Chen     let mut pipe = [-1; 2];
191ef8fb9bdSBo Chen     if unsafe { libc::pipe2(pipe.as_mut_ptr(), libc::O_CLOEXEC) } == -1 {
192ef8fb9bdSBo Chen         return Err(std::io::Error::last_os_error());
193ef8fb9bdSBo Chen     }
194ef8fb9bdSBo Chen     let rx = unsafe { File::from_raw_fd(pipe[0]) };
195ef8fb9bdSBo Chen     let tx = unsafe { File::from_raw_fd(pipe[1]) };
196ef8fb9bdSBo Chen 
197ef8fb9bdSBo Chen     Ok((rx, tx))
198ef8fb9bdSBo Chen }
199