1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 // 4 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style license that can be 6 // found in the THIRD-PARTY file. 7 8 use std::collections::HashMap; 9 use std::net::IpAddr; 10 use std::num::Wrapping; 11 use std::ops::Deref; 12 use std::os::unix::io::{AsRawFd, RawFd}; 13 use std::sync::atomic::{AtomicBool, Ordering}; 14 use std::sync::{Arc, Barrier}; 15 use std::{result, thread}; 16 17 use anyhow::anyhow; 18 #[cfg(not(fuzzing))] 19 use net_util::virtio_features_to_tap_offload; 20 use net_util::{ 21 build_net_config_space, build_net_config_space_with_mq, open_tap, CtrlQueue, MacAddr, 22 NetCounters, NetQueuePair, OpenTapError, RxVirtio, Tap, TapError, TxVirtio, VirtioNetConfig, 23 }; 24 use seccompiler::SeccompAction; 25 use serde::{Deserialize, Serialize}; 26 use thiserror::Error; 27 use virtio_bindings::virtio_config::*; 28 use virtio_bindings::virtio_net::*; 29 use virtio_bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; 30 use virtio_queue::{Queue, QueueT}; 31 use vm_memory::{ByteValued, GuestAddressSpace, GuestMemoryAtomic}; 32 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 33 use vm_virtio::AccessPlatform; 34 use vmm_sys_util::eventfd::EventFd; 35 36 use super::{ 37 ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, 38 Error as DeviceError, RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, 39 VirtioInterruptType, EPOLL_HELPER_EVENT_LAST, 40 }; 41 use crate::seccomp_filters::Thread; 42 use crate::thread_helper::spawn_virtio_thread; 43 use crate::{GuestMemoryMmap, VirtioInterrupt}; 44 45 /// Control queue 46 // Event available on the control queue. 47 const CTRL_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; 48 49 // Following the VIRTIO specification, the MTU should be at least 1280. 50 pub const MIN_MTU: u16 = 1280; 51 52 pub struct NetCtrlEpollHandler { 53 pub mem: GuestMemoryAtomic<GuestMemoryMmap>, 54 pub kill_evt: EventFd, 55 pub pause_evt: EventFd, 56 pub ctrl_q: CtrlQueue, 57 pub queue_evt: EventFd, 58 pub queue: Queue, 59 pub access_platform: Option<Arc<dyn AccessPlatform>>, 60 pub interrupt_cb: Arc<dyn VirtioInterrupt>, 61 pub queue_index: u16, 62 } 63 64 impl NetCtrlEpollHandler { 65 fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> { 66 self.interrupt_cb 67 .trigger(VirtioInterruptType::Queue(queue_index)) 68 .map_err(|e| { 69 error!("Failed to signal used queue: {:?}", e); 70 DeviceError::FailedSignalingUsedQueue(e) 71 }) 72 } 73 74 pub fn run_ctrl( 75 &mut self, 76 paused: Arc<AtomicBool>, 77 paused_sync: Arc<Barrier>, 78 ) -> std::result::Result<(), EpollHelperError> { 79 let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?; 80 helper.add_event(self.queue_evt.as_raw_fd(), CTRL_QUEUE_EVENT)?; 81 helper.run(paused, paused_sync, self)?; 82 83 Ok(()) 84 } 85 } 86 87 impl EpollHelperHandler for NetCtrlEpollHandler { 88 fn handle_event( 89 &mut self, 90 _helper: &mut EpollHelper, 91 event: &epoll::Event, 92 ) -> result::Result<(), EpollHelperError> { 93 let ev_type = event.data as u16; 94 match ev_type { 95 CTRL_QUEUE_EVENT => { 96 let mem = self.mem.memory(); 97 self.queue_evt.read().map_err(|e| { 98 EpollHelperError::HandleEvent(anyhow!( 99 "Failed to get control queue event: {:?}", 100 e 101 )) 102 })?; 103 self.ctrl_q 104 .process(mem.deref(), &mut self.queue, self.access_platform.as_ref()) 105 .map_err(|e| { 106 EpollHelperError::HandleEvent(anyhow!( 107 "Failed to process control queue: {:?}", 108 e 109 )) 110 })?; 111 match self.queue.needs_notification(mem.deref()) { 112 Ok(true) => { 113 self.signal_used_queue(self.queue_index).map_err(|e| { 114 EpollHelperError::HandleEvent(anyhow!( 115 "Error signalling that control queue was used: {:?}", 116 e 117 )) 118 })?; 119 } 120 Ok(false) => {} 121 Err(e) => { 122 return Err(EpollHelperError::HandleEvent(anyhow!( 123 "Error getting notification state of control queue: {}", 124 e 125 ))); 126 } 127 }; 128 } 129 _ => { 130 return Err(EpollHelperError::HandleEvent(anyhow!( 131 "Unknown event for virtio-net control queue" 132 ))); 133 } 134 } 135 136 Ok(()) 137 } 138 } 139 140 /// Rx/Tx queue pair 141 // The guest has made a buffer available to receive a frame into. 142 pub const RX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; 143 // The transmit queue has a frame that is ready to send from the guest. 144 pub const TX_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2; 145 // A frame is available for reading from the tap device to receive in the guest. 146 pub const RX_TAP_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3; 147 // The TAP can be written to. Used after an EAGAIN error to retry TX. 148 pub const TX_TAP_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 4; 149 // New 'wake up' event from the rx rate limiter 150 pub const RX_RATE_LIMITER_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 5; 151 // New 'wake up' event from the tx rate limiter 152 pub const TX_RATE_LIMITER_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 6; 153 154 #[derive(Error, Debug)] 155 pub enum Error { 156 #[error("Failed to open taps: {0}")] 157 OpenTap(OpenTapError), 158 #[error("Using existing tap: {0}")] 159 TapError(TapError), 160 #[error("Error calling dup() on tap fd: {0}")] 161 DuplicateTapFd(std::io::Error), 162 } 163 164 pub type Result<T> = result::Result<T, Error>; 165 166 struct NetEpollHandler { 167 net: NetQueuePair, 168 mem: GuestMemoryAtomic<GuestMemoryMmap>, 169 interrupt_cb: Arc<dyn VirtioInterrupt>, 170 kill_evt: EventFd, 171 pause_evt: EventFd, 172 queue_index_base: u16, 173 queue_pair: (Queue, Queue), 174 queue_evt_pair: (EventFd, EventFd), 175 // Always generate interrupts until the driver has signalled to the device. 176 // This mitigates a problem with interrupts from tap events being "lost" upon 177 // a restore as the vCPU thread isn't ready to handle the interrupt. This causes 178 // issues when combined with VIRTIO_RING_F_EVENT_IDX interrupt suppression. 179 driver_awake: bool, 180 } 181 182 impl NetEpollHandler { 183 fn signal_used_queue(&self, queue_index: u16) -> result::Result<(), DeviceError> { 184 self.interrupt_cb 185 .trigger(VirtioInterruptType::Queue(queue_index)) 186 .map_err(|e| { 187 error!("Failed to signal used queue: {:?}", e); 188 DeviceError::FailedSignalingUsedQueue(e) 189 }) 190 } 191 192 fn handle_rx_event(&mut self) -> result::Result<(), DeviceError> { 193 let queue_evt = &self.queue_evt_pair.0; 194 if let Err(e) = queue_evt.read() { 195 error!("Failed to get rx queue event: {:?}", e); 196 } 197 198 self.net.rx_desc_avail = true; 199 200 let rate_limit_reached = self 201 .net 202 .rx_rate_limiter 203 .as_ref() 204 .is_some_and(|r| r.is_blocked()); 205 206 // Start to listen on RX_TAP_EVENT only when the rate limit is not reached 207 if !self.net.rx_tap_listening && !rate_limit_reached { 208 net_util::register_listener( 209 self.net.epoll_fd.unwrap(), 210 self.net.tap.as_raw_fd(), 211 epoll::Events::EPOLLIN, 212 u64::from(self.net.tap_rx_event_id), 213 ) 214 .map_err(DeviceError::IoError)?; 215 self.net.rx_tap_listening = true; 216 } 217 218 Ok(()) 219 } 220 221 fn process_tx(&mut self) -> result::Result<(), DeviceError> { 222 if self 223 .net 224 .process_tx(&self.mem.memory(), &mut self.queue_pair.1) 225 .map_err(DeviceError::NetQueuePair)? 226 || !self.driver_awake 227 { 228 self.signal_used_queue(self.queue_index_base + 1)?; 229 debug!("Signalling TX queue"); 230 } else { 231 debug!("Not signalling TX queue"); 232 } 233 Ok(()) 234 } 235 236 fn handle_tx_event(&mut self) -> result::Result<(), DeviceError> { 237 let rate_limit_reached = self 238 .net 239 .tx_rate_limiter 240 .as_ref() 241 .is_some_and(|r| r.is_blocked()); 242 243 if !rate_limit_reached { 244 self.process_tx()?; 245 } 246 247 Ok(()) 248 } 249 250 fn handle_rx_tap_event(&mut self) -> result::Result<(), DeviceError> { 251 if self 252 .net 253 .process_rx(&self.mem.memory(), &mut self.queue_pair.0) 254 .map_err(DeviceError::NetQueuePair)? 255 || !self.driver_awake 256 { 257 self.signal_used_queue(self.queue_index_base)?; 258 debug!("Signalling RX queue"); 259 } else { 260 debug!("Not signalling RX queue"); 261 } 262 Ok(()) 263 } 264 265 fn run( 266 &mut self, 267 paused: Arc<AtomicBool>, 268 paused_sync: Arc<Barrier>, 269 ) -> result::Result<(), EpollHelperError> { 270 let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?; 271 helper.add_event(self.queue_evt_pair.0.as_raw_fd(), RX_QUEUE_EVENT)?; 272 helper.add_event(self.queue_evt_pair.1.as_raw_fd(), TX_QUEUE_EVENT)?; 273 if let Some(rate_limiter) = &self.net.rx_rate_limiter { 274 helper.add_event(rate_limiter.as_raw_fd(), RX_RATE_LIMITER_EVENT)?; 275 } 276 if let Some(rate_limiter) = &self.net.tx_rate_limiter { 277 helper.add_event(rate_limiter.as_raw_fd(), TX_RATE_LIMITER_EVENT)?; 278 } 279 280 let mem = self.mem.memory(); 281 // If there are some already available descriptors on the RX queue, 282 // then we can start the thread while listening onto the TAP. 283 if self 284 .queue_pair 285 .0 286 .used_idx(mem.deref(), Ordering::Acquire) 287 .map_err(EpollHelperError::QueueRingIndex)? 288 < self 289 .queue_pair 290 .0 291 .avail_idx(mem.deref(), Ordering::Acquire) 292 .map_err(EpollHelperError::QueueRingIndex)? 293 { 294 helper.add_event(self.net.tap.as_raw_fd(), RX_TAP_EVENT)?; 295 self.net.rx_tap_listening = true; 296 info!("Listener registered at start"); 297 } 298 299 // The NetQueuePair needs the epoll fd. 300 self.net.epoll_fd = Some(helper.as_raw_fd()); 301 302 helper.run(paused, paused_sync, self)?; 303 304 Ok(()) 305 } 306 } 307 308 impl EpollHelperHandler for NetEpollHandler { 309 fn handle_event( 310 &mut self, 311 _helper: &mut EpollHelper, 312 event: &epoll::Event, 313 ) -> result::Result<(), EpollHelperError> { 314 let ev_type = event.data as u16; 315 match ev_type { 316 RX_QUEUE_EVENT => { 317 self.driver_awake = true; 318 self.handle_rx_event().map_err(|e| { 319 EpollHelperError::HandleEvent(anyhow!("Error processing RX queue: {:?}", e)) 320 })?; 321 } 322 TX_QUEUE_EVENT => { 323 let queue_evt = &self.queue_evt_pair.1; 324 if let Err(e) = queue_evt.read() { 325 error!("Failed to get tx queue event: {:?}", e); 326 } 327 self.driver_awake = true; 328 self.handle_tx_event().map_err(|e| { 329 EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e)) 330 })?; 331 } 332 TX_TAP_EVENT => { 333 self.handle_tx_event().map_err(|e| { 334 EpollHelperError::HandleEvent(anyhow!( 335 "Error processing TX queue (TAP event): {:?}", 336 e 337 )) 338 })?; 339 } 340 RX_TAP_EVENT => { 341 self.handle_rx_tap_event().map_err(|e| { 342 EpollHelperError::HandleEvent(anyhow!("Error processing tap queue: {:?}", e)) 343 })?; 344 } 345 RX_RATE_LIMITER_EVENT => { 346 if let Some(rate_limiter) = &mut self.net.rx_rate_limiter { 347 // Upon rate limiter event, call the rate limiter handler and register the 348 // TAP fd for further processing if some RX buffers are available 349 rate_limiter.event_handler().map_err(|e| { 350 EpollHelperError::HandleEvent(anyhow!( 351 "Error from 'rate_limiter.event_handler()': {:?}", 352 e 353 )) 354 })?; 355 356 if !self.net.rx_tap_listening && self.net.rx_desc_avail { 357 net_util::register_listener( 358 self.net.epoll_fd.unwrap(), 359 self.net.tap.as_raw_fd(), 360 epoll::Events::EPOLLIN, 361 u64::from(self.net.tap_rx_event_id), 362 ) 363 .map_err(|e| { 364 EpollHelperError::HandleEvent(anyhow!( 365 "Error register_listener with `RX_RATE_LIMITER_EVENT`: {:?}", 366 e 367 )) 368 })?; 369 370 self.net.rx_tap_listening = true; 371 } 372 } else { 373 return Err(EpollHelperError::HandleEvent(anyhow!( 374 "Unexpected RX_RATE_LIMITER_EVENT" 375 ))); 376 } 377 } 378 TX_RATE_LIMITER_EVENT => { 379 if let Some(rate_limiter) = &mut self.net.tx_rate_limiter { 380 // Upon rate limiter event, call the rate limiter handler 381 // and restart processing the queue. 382 rate_limiter.event_handler().map_err(|e| { 383 EpollHelperError::HandleEvent(anyhow!( 384 "Error from 'rate_limiter.event_handler()': {:?}", 385 e 386 )) 387 })?; 388 389 self.driver_awake = true; 390 self.process_tx().map_err(|e| { 391 EpollHelperError::HandleEvent(anyhow!("Error processing TX queue: {:?}", e)) 392 })?; 393 } else { 394 return Err(EpollHelperError::HandleEvent(anyhow!( 395 "Unexpected TX_RATE_LIMITER_EVENT" 396 ))); 397 } 398 } 399 _ => { 400 return Err(EpollHelperError::HandleEvent(anyhow!( 401 "Unexpected event: {}", 402 ev_type 403 ))); 404 } 405 } 406 Ok(()) 407 } 408 } 409 410 pub struct Net { 411 common: VirtioCommon, 412 id: String, 413 taps: Vec<Tap>, 414 config: VirtioNetConfig, 415 ctrl_queue_epoll_thread: Option<thread::JoinHandle<()>>, 416 counters: NetCounters, 417 seccomp_action: SeccompAction, 418 rate_limiter_config: Option<RateLimiterConfig>, 419 exit_evt: EventFd, 420 } 421 422 #[derive(Serialize, Deserialize)] 423 pub struct NetState { 424 pub avail_features: u64, 425 pub acked_features: u64, 426 pub config: VirtioNetConfig, 427 pub queue_size: Vec<u16>, 428 } 429 430 impl Net { 431 /// Create a new virtio network device with the given TAP interface. 432 #[allow(clippy::too_many_arguments)] 433 pub fn new_with_tap( 434 id: String, 435 taps: Vec<Tap>, 436 guest_mac: Option<MacAddr>, 437 iommu: bool, 438 num_queues: usize, 439 queue_size: u16, 440 seccomp_action: SeccompAction, 441 rate_limiter_config: Option<RateLimiterConfig>, 442 exit_evt: EventFd, 443 state: Option<NetState>, 444 offload_tso: bool, 445 offload_ufo: bool, 446 offload_csum: bool, 447 ) -> Result<Self> { 448 assert!(!taps.is_empty()); 449 450 let mtu = taps[0].mtu().map_err(Error::TapError)? as u16; 451 452 let (avail_features, acked_features, config, queue_sizes, paused) = if let Some(state) = 453 state 454 { 455 info!("Restoring virtio-net {}", id); 456 ( 457 state.avail_features, 458 state.acked_features, 459 state.config, 460 state.queue_size, 461 true, 462 ) 463 } else { 464 let mut avail_features = (1 << VIRTIO_NET_F_MTU) 465 | (1 << VIRTIO_RING_F_EVENT_IDX) 466 | (1 << VIRTIO_F_VERSION_1); 467 468 if iommu { 469 avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM; 470 } 471 472 // Configure TSO/UFO features when hardware checksum offload is enabled. 473 if offload_csum { 474 avail_features |= (1 << VIRTIO_NET_F_CSUM) 475 | (1 << VIRTIO_NET_F_GUEST_CSUM) 476 | (1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); 477 478 if offload_tso { 479 avail_features |= (1 << VIRTIO_NET_F_HOST_ECN) 480 | (1 << VIRTIO_NET_F_HOST_TSO4) 481 | (1 << VIRTIO_NET_F_HOST_TSO6) 482 | (1 << VIRTIO_NET_F_GUEST_ECN) 483 | (1 << VIRTIO_NET_F_GUEST_TSO4) 484 | (1 << VIRTIO_NET_F_GUEST_TSO6); 485 } 486 487 if offload_ufo { 488 avail_features |= (1 << VIRTIO_NET_F_HOST_UFO) | (1 << VIRTIO_NET_F_GUEST_UFO); 489 } 490 } 491 492 avail_features |= 1 << VIRTIO_NET_F_CTRL_VQ; 493 let queue_num = num_queues + 1; 494 495 let mut config = VirtioNetConfig::default(); 496 if let Some(mac) = guest_mac { 497 build_net_config_space( 498 &mut config, 499 mac, 500 num_queues, 501 Some(mtu), 502 &mut avail_features, 503 ); 504 } else { 505 build_net_config_space_with_mq( 506 &mut config, 507 num_queues, 508 Some(mtu), 509 &mut avail_features, 510 ); 511 } 512 513 ( 514 avail_features, 515 0, 516 config, 517 vec![queue_size; queue_num], 518 false, 519 ) 520 }; 521 522 Ok(Net { 523 common: VirtioCommon { 524 device_type: VirtioDeviceType::Net as u32, 525 avail_features, 526 acked_features, 527 queue_sizes, 528 paused_sync: Some(Arc::new(Barrier::new((num_queues / 2) + 1))), 529 min_queues: 2, 530 paused: Arc::new(AtomicBool::new(paused)), 531 ..Default::default() 532 }, 533 id, 534 taps, 535 config, 536 ctrl_queue_epoll_thread: None, 537 counters: NetCounters::default(), 538 seccomp_action, 539 rate_limiter_config, 540 exit_evt, 541 }) 542 } 543 544 /// Create a new virtio network device with the given IP address and 545 /// netmask. 546 #[allow(clippy::too_many_arguments)] 547 pub fn new( 548 id: String, 549 if_name: Option<&str>, 550 ip_addr: Option<IpAddr>, 551 netmask: Option<IpAddr>, 552 guest_mac: Option<MacAddr>, 553 host_mac: &mut Option<MacAddr>, 554 mtu: Option<u16>, 555 iommu: bool, 556 num_queues: usize, 557 queue_size: u16, 558 seccomp_action: SeccompAction, 559 rate_limiter_config: Option<RateLimiterConfig>, 560 exit_evt: EventFd, 561 state: Option<NetState>, 562 offload_tso: bool, 563 offload_ufo: bool, 564 offload_csum: bool, 565 ) -> Result<Self> { 566 let taps = open_tap( 567 if_name, 568 ip_addr, 569 netmask, 570 host_mac, 571 mtu, 572 num_queues / 2, 573 None, 574 ) 575 .map_err(Error::OpenTap)?; 576 577 Self::new_with_tap( 578 id, 579 taps, 580 guest_mac, 581 iommu, 582 num_queues, 583 queue_size, 584 seccomp_action, 585 rate_limiter_config, 586 exit_evt, 587 state, 588 offload_tso, 589 offload_ufo, 590 offload_csum, 591 ) 592 } 593 594 #[allow(clippy::too_many_arguments)] 595 pub fn from_tap_fds( 596 id: String, 597 fds: &[RawFd], 598 guest_mac: Option<MacAddr>, 599 mtu: Option<u16>, 600 iommu: bool, 601 queue_size: u16, 602 seccomp_action: SeccompAction, 603 rate_limiter_config: Option<RateLimiterConfig>, 604 exit_evt: EventFd, 605 state: Option<NetState>, 606 offload_tso: bool, 607 offload_ufo: bool, 608 offload_csum: bool, 609 ) -> Result<Self> { 610 let mut taps: Vec<Tap> = Vec::new(); 611 let num_queue_pairs = fds.len(); 612 613 for fd in fds.iter() { 614 // Duplicate so that it can survive reboots 615 // SAFETY: FFI call to dup. Trivially safe. 616 let fd = unsafe { libc::dup(*fd) }; 617 if fd < 0 { 618 return Err(Error::DuplicateTapFd(std::io::Error::last_os_error())); 619 } 620 let tap = Tap::from_tap_fd(fd, num_queue_pairs).map_err(Error::TapError)?; 621 taps.push(tap); 622 } 623 624 assert!(!taps.is_empty()); 625 626 if let Some(mtu) = mtu { 627 taps[0].set_mtu(mtu as i32).map_err(Error::TapError)?; 628 } 629 630 Self::new_with_tap( 631 id, 632 taps, 633 guest_mac, 634 iommu, 635 num_queue_pairs * 2, 636 queue_size, 637 seccomp_action, 638 rate_limiter_config, 639 exit_evt, 640 state, 641 offload_tso, 642 offload_ufo, 643 offload_csum, 644 ) 645 } 646 647 fn state(&self) -> NetState { 648 NetState { 649 avail_features: self.common.avail_features, 650 acked_features: self.common.acked_features, 651 config: self.config, 652 queue_size: self.common.queue_sizes.clone(), 653 } 654 } 655 656 #[cfg(fuzzing)] 657 pub fn wait_for_epoll_threads(&mut self) { 658 self.common.wait_for_epoll_threads(); 659 } 660 } 661 662 impl Drop for Net { 663 fn drop(&mut self) { 664 if let Some(kill_evt) = self.common.kill_evt.take() { 665 // Ignore the result because there is nothing we can do about it. 666 let _ = kill_evt.write(1); 667 } 668 // Needed to ensure all references to tap FDs are dropped (#4868) 669 self.common.wait_for_epoll_threads(); 670 if let Some(thread) = self.ctrl_queue_epoll_thread.take() { 671 if let Err(e) = thread.join() { 672 error!("Error joining thread: {:?}", e); 673 } 674 } 675 } 676 } 677 678 impl VirtioDevice for Net { 679 fn device_type(&self) -> u32 { 680 self.common.device_type 681 } 682 683 fn queue_max_sizes(&self) -> &[u16] { 684 &self.common.queue_sizes 685 } 686 687 fn features(&self) -> u64 { 688 self.common.avail_features 689 } 690 691 fn ack_features(&mut self, value: u64) { 692 self.common.ack_features(value) 693 } 694 695 fn read_config(&self, offset: u64, data: &mut [u8]) { 696 self.read_config_from_slice(self.config.as_slice(), offset, data); 697 } 698 699 fn activate( 700 &mut self, 701 mem: GuestMemoryAtomic<GuestMemoryMmap>, 702 interrupt_cb: Arc<dyn VirtioInterrupt>, 703 mut queues: Vec<(usize, Queue, EventFd)>, 704 ) -> ActivateResult { 705 self.common.activate(&queues, &interrupt_cb)?; 706 707 let num_queues = queues.len(); 708 let event_idx = self.common.feature_acked(VIRTIO_RING_F_EVENT_IDX.into()); 709 if self.common.feature_acked(VIRTIO_NET_F_CTRL_VQ.into()) && num_queues % 2 != 0 { 710 let ctrl_queue_index = num_queues - 1; 711 let (_, mut ctrl_queue, ctrl_queue_evt) = queues.remove(ctrl_queue_index); 712 713 ctrl_queue.set_event_idx(event_idx); 714 715 let (kill_evt, pause_evt) = self.common.dup_eventfds(); 716 let mut ctrl_handler = NetCtrlEpollHandler { 717 mem: mem.clone(), 718 kill_evt, 719 pause_evt, 720 ctrl_q: CtrlQueue::new(self.taps.clone()), 721 queue: ctrl_queue, 722 queue_evt: ctrl_queue_evt, 723 access_platform: self.common.access_platform.clone(), 724 queue_index: ctrl_queue_index as u16, 725 interrupt_cb: interrupt_cb.clone(), 726 }; 727 728 let paused = self.common.paused.clone(); 729 // Let's update the barrier as we need 1 for each RX/TX pair + 730 // 1 for the control queue + 1 for the main thread signalling 731 // the pause. 732 self.common.paused_sync = Some(Arc::new(Barrier::new(self.taps.len() + 2))); 733 let paused_sync = self.common.paused_sync.clone(); 734 735 let mut epoll_threads = Vec::new(); 736 spawn_virtio_thread( 737 &format!("{}_ctrl", &self.id), 738 &self.seccomp_action, 739 Thread::VirtioNetCtl, 740 &mut epoll_threads, 741 &self.exit_evt, 742 move || ctrl_handler.run_ctrl(paused, paused_sync.unwrap()), 743 )?; 744 self.ctrl_queue_epoll_thread = Some(epoll_threads.remove(0)); 745 } 746 747 let mut epoll_threads = Vec::new(); 748 let mut taps = self.taps.clone(); 749 for i in 0..queues.len() / 2 { 750 let rx = RxVirtio::new(); 751 let tx = TxVirtio::new(); 752 let rx_tap_listening = false; 753 754 let (_, queue_0, queue_evt_0) = queues.remove(0); 755 let (_, queue_1, queue_evt_1) = queues.remove(0); 756 let mut queue_pair = (queue_0, queue_1); 757 queue_pair.0.set_event_idx(event_idx); 758 queue_pair.1.set_event_idx(event_idx); 759 760 let queue_evt_pair = (queue_evt_0, queue_evt_1); 761 762 let (kill_evt, pause_evt) = self.common.dup_eventfds(); 763 764 let rx_rate_limiter: Option<rate_limiter::RateLimiter> = self 765 .rate_limiter_config 766 .map(RateLimiterConfig::try_into) 767 .transpose() 768 .map_err(ActivateError::CreateRateLimiter)?; 769 770 let tx_rate_limiter: Option<rate_limiter::RateLimiter> = self 771 .rate_limiter_config 772 .map(RateLimiterConfig::try_into) 773 .transpose() 774 .map_err(ActivateError::CreateRateLimiter)?; 775 776 let tap = taps.remove(0); 777 #[cfg(not(fuzzing))] 778 tap.set_offload(virtio_features_to_tap_offload(self.common.acked_features)) 779 .map_err(|e| { 780 error!("Error programming tap offload: {:?}", e); 781 ActivateError::BadActivate 782 })?; 783 784 let mut handler = NetEpollHandler { 785 net: NetQueuePair { 786 tap_for_write_epoll: tap.clone(), 787 tap, 788 rx, 789 tx, 790 epoll_fd: None, 791 rx_tap_listening, 792 tx_tap_listening: false, 793 counters: self.counters.clone(), 794 tap_rx_event_id: RX_TAP_EVENT, 795 tap_tx_event_id: TX_TAP_EVENT, 796 rx_desc_avail: false, 797 rx_rate_limiter, 798 tx_rate_limiter, 799 access_platform: self.common.access_platform.clone(), 800 }, 801 mem: mem.clone(), 802 queue_index_base: (i * 2) as u16, 803 queue_pair, 804 queue_evt_pair, 805 interrupt_cb: interrupt_cb.clone(), 806 kill_evt, 807 pause_evt, 808 driver_awake: false, 809 }; 810 811 let paused = self.common.paused.clone(); 812 let paused_sync = self.common.paused_sync.clone(); 813 814 spawn_virtio_thread( 815 &format!("{}_qp{}", self.id.clone(), i), 816 &self.seccomp_action, 817 Thread::VirtioNet, 818 &mut epoll_threads, 819 &self.exit_evt, 820 move || handler.run(paused, paused_sync.unwrap()), 821 )?; 822 } 823 824 self.common.epoll_threads = Some(epoll_threads); 825 826 event!("virtio-device", "activated", "id", &self.id); 827 Ok(()) 828 } 829 830 fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> { 831 let result = self.common.reset(); 832 event!("virtio-device", "reset", "id", &self.id); 833 result 834 } 835 836 fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> { 837 let mut counters = HashMap::new(); 838 839 counters.insert( 840 "rx_bytes", 841 Wrapping(self.counters.rx_bytes.load(Ordering::Acquire)), 842 ); 843 counters.insert( 844 "rx_frames", 845 Wrapping(self.counters.rx_frames.load(Ordering::Acquire)), 846 ); 847 counters.insert( 848 "tx_bytes", 849 Wrapping(self.counters.tx_bytes.load(Ordering::Acquire)), 850 ); 851 counters.insert( 852 "tx_frames", 853 Wrapping(self.counters.tx_frames.load(Ordering::Acquire)), 854 ); 855 856 Some(counters) 857 } 858 859 fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) { 860 self.common.set_access_platform(access_platform) 861 } 862 } 863 864 impl Pausable for Net { 865 fn pause(&mut self) -> result::Result<(), MigratableError> { 866 self.common.pause() 867 } 868 869 fn resume(&mut self) -> result::Result<(), MigratableError> { 870 self.common.resume()?; 871 872 if let Some(ctrl_queue_epoll_thread) = &self.ctrl_queue_epoll_thread { 873 ctrl_queue_epoll_thread.thread().unpark(); 874 } 875 Ok(()) 876 } 877 } 878 879 impl Snapshottable for Net { 880 fn id(&self) -> String { 881 self.id.clone() 882 } 883 884 fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> { 885 Snapshot::new_from_state(&self.state()) 886 } 887 } 888 impl Transportable for Net {} 889 impl Migratable for Net {} 890