xref: /cloud-hypervisor/fuzz/fuzz_targets/watchdog.rs (revision eeae63b4595fbf0cc69f62b6e9d9a79c543c4ac7)
1 // Copyright © 2022 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 
5 #![no_main]
6 
7 use std::os::unix::io::{AsRawFd, FromRawFd};
8 use std::sync::Arc;
9 
10 use libfuzzer_sys::{fuzz_target, Corpus};
11 use seccompiler::SeccompAction;
12 use virtio_devices::{VirtioDevice, VirtioInterrupt, VirtioInterruptType};
13 use virtio_queue::{Queue, QueueT};
14 use vm_memory::bitmap::AtomicBitmap;
15 use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic};
16 use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
17 
18 type GuestMemoryMmap = vm_memory::GuestMemoryMmap<AtomicBitmap>;
19 
20 const QUEUE_DATA_SIZE: usize = 4;
21 const MEM_SIZE: usize = 256 * 1024 * 1024;
22 // Max entries in the queue.
23 const QUEUE_SIZE: u16 = 256;
24 // Guest physical address for descriptor table.
25 const DESC_TABLE_ADDR: u64 = 0;
26 const DESC_TABLE_SIZE: u64 = 16_u64 * QUEUE_SIZE as u64;
27 // Guest physical address for available ring
28 const AVAIL_RING_ADDR: u64 = DESC_TABLE_ADDR + DESC_TABLE_SIZE;
29 const AVAIL_RING_SIZE: u64 = 6_u64 + 2 * QUEUE_SIZE as u64;
30 // Guest physical address for used ring (requires to 4-bytes aligned)
31 const USED_RING_ADDR: u64 = (AVAIL_RING_ADDR + AVAIL_RING_SIZE + 3) & !3_u64;
32 
33 fuzz_target!(|bytes: &[u8]| -> Corpus {
34     if bytes.len() < QUEUE_DATA_SIZE || bytes.len() > (QUEUE_DATA_SIZE + MEM_SIZE) {
35         return Corpus::Reject;
36     }
37 
38     let mut watchdog = virtio_devices::Watchdog::new(
39         "fuzzer_watchdog".to_owned(),
40         EventFd::new(EFD_NONBLOCK).unwrap(),
41         SeccompAction::Allow,
42         EventFd::new(EFD_NONBLOCK).unwrap(),
43         None,
44     )
45     .unwrap();
46 
47     let queue_data = &bytes[..QUEUE_DATA_SIZE];
48     let mem_bytes = &bytes[QUEUE_DATA_SIZE..];
49 
50     // Setup the virt queue with the input bytes
51     let q = setup_virt_queue(queue_data.try_into().unwrap());
52 
53     // Setup the guest memory with the input bytes
54     let mem = GuestMemoryMmap::from_ranges(&[(GuestAddress(0), MEM_SIZE)]).unwrap();
55     if mem.write_slice(mem_bytes, GuestAddress(0 as u64)).is_err() {
56         return Corpus::Reject;
57     }
58     let guest_memory = GuestMemoryAtomic::new(mem);
59 
60     let evt = EventFd::new(0).unwrap();
61     let queue_evt = unsafe { EventFd::from_raw_fd(libc::dup(evt.as_raw_fd())) };
62 
63     // Kick the 'queue' event before activate the watchdog device
64     queue_evt.write(1).unwrap();
65 
66     watchdog
67         .activate(
68             guest_memory,
69             Arc::new(NoopVirtioInterrupt {}),
70             vec![(0, q, evt)],
71         )
72         .ok();
73 
74     // Wait for the events to finish and watchdog device worker thread to return
75     watchdog.wait_for_epoll_threads();
76 
77     Corpus::Keep
78 });
79 
80 pub struct NoopVirtioInterrupt {}
81 
82 impl VirtioInterrupt for NoopVirtioInterrupt {
83     fn trigger(&self, _int_type: VirtioInterruptType) -> std::result::Result<(), std::io::Error> {
84         Ok(())
85     }
86 }
87 
88 fn setup_virt_queue(bytes: &[u8; QUEUE_DATA_SIZE]) -> Queue {
89     let mut q = Queue::new(QUEUE_SIZE).unwrap();
90     q.set_next_avail(bytes[0] as u16); // 'u8' is enough given the 'QUEUE_SIZE' is small
91     q.set_next_used(bytes[1] as u16);
92     q.set_event_idx(bytes[2] % 2 != 0);
93     q.set_size(bytes[3] as u16 % QUEUE_SIZE);
94 
95     q.try_set_desc_table_address(GuestAddress(DESC_TABLE_ADDR))
96         .unwrap();
97     q.try_set_avail_ring_address(GuestAddress(AVAIL_RING_ADDR))
98         .unwrap();
99     q.try_set_used_ring_address(GuestAddress(USED_RING_ADDR))
100         .unwrap();
101     q.set_ready(true);
102 
103     q
104 }
105