1 // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 // 3 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. 4 // Use of this source code is governed by a BSD-style license that can be 5 // found in the LICENSE-BSD-3-Clause file. 6 // 7 // Copyright © 2020 Intel Corporation 8 // 9 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 10 11 use super::Error as DeviceError; 12 use super::{ 13 ActivateError, ActivateResult, EpollHelper, EpollHelperError, EpollHelperHandler, 14 RateLimiterConfig, VirtioCommon, VirtioDevice, VirtioDeviceType, VirtioInterruptType, 15 EPOLL_HELPER_EVENT_LAST, 16 }; 17 use crate::seccomp_filters::Thread; 18 use crate::thread_helper::spawn_virtio_thread; 19 use crate::GuestMemoryMmap; 20 use crate::VirtioInterrupt; 21 use block_util::{ 22 async_io::AsyncIo, async_io::AsyncIoError, async_io::DiskFile, build_disk_image_id, Request, 23 RequestType, VirtioBlockConfig, 24 }; 25 use rate_limiter::{RateLimiter, TokenType}; 26 use seccompiler::SeccompAction; 27 use std::io; 28 use std::num::Wrapping; 29 use std::os::unix::io::AsRawFd; 30 use std::path::PathBuf; 31 use std::result; 32 use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; 33 use std::sync::{Arc, Barrier}; 34 use std::{collections::HashMap, convert::TryInto}; 35 use versionize::{VersionMap, Versionize, VersionizeResult}; 36 use versionize_derive::Versionize; 37 use virtio_bindings::bindings::virtio_blk::*; 38 use virtio_queue::Queue; 39 use vm_memory::{ByteValued, Bytes, GuestAddressSpace, GuestMemoryAtomic}; 40 use vm_migration::VersionMapped; 41 use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable}; 42 use vm_virtio::AccessPlatform; 43 use vmm_sys_util::eventfd::EventFd; 44 45 const SECTOR_SHIFT: u8 = 9; 46 pub const SECTOR_SIZE: u64 = 0x01 << SECTOR_SHIFT; 47 48 // New descriptors are pending on the virtio queue. 49 const QUEUE_AVAIL_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1; 50 // New completed tasks are pending on the completion ring. 51 const COMPLETION_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2; 52 // New 'wake up' event from the rate limiter 53 const RATE_LIMITER_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3; 54 55 #[derive(Debug)] 56 pub enum Error { 57 /// Failed to parse the request. 58 RequestParsing(block_util::Error), 59 /// Failed to execute the request. 60 RequestExecuting(block_util::ExecuteError), 61 /// Failed to complete the request. 62 RequestCompleting(block_util::Error), 63 /// Missing the expected entry in the list of requests. 64 MissingEntryRequestList, 65 /// The asynchronous request returned with failure. 66 AsyncRequestFailure, 67 /// Failed synchronizing the file 68 Fsync(AsyncIoError), 69 /// Failed adding used index 70 QueueAddUsed(virtio_queue::Error), 71 /// Failed creating an iterator over the queue 72 QueueIterator(virtio_queue::Error), 73 } 74 75 pub type Result<T> = result::Result<T, Error>; 76 77 #[derive(Default, Clone)] 78 pub struct BlockCounters { 79 read_bytes: Arc<AtomicU64>, 80 read_ops: Arc<AtomicU64>, 81 write_bytes: Arc<AtomicU64>, 82 write_ops: Arc<AtomicU64>, 83 } 84 85 struct BlockEpollHandler { 86 queue_index: u16, 87 queue: Queue<GuestMemoryAtomic<GuestMemoryMmap>>, 88 mem: GuestMemoryAtomic<GuestMemoryMmap>, 89 disk_image: Box<dyn AsyncIo>, 90 disk_nsectors: u64, 91 interrupt_cb: Arc<dyn VirtioInterrupt>, 92 disk_image_id: Vec<u8>, 93 kill_evt: EventFd, 94 pause_evt: EventFd, 95 writeback: Arc<AtomicBool>, 96 counters: BlockCounters, 97 queue_evt: EventFd, 98 request_list: HashMap<u16, Request>, 99 rate_limiter: Option<RateLimiter>, 100 access_platform: Option<Arc<dyn AccessPlatform>>, 101 } 102 103 impl BlockEpollHandler { 104 fn process_queue_submit(&mut self) -> Result<bool> { 105 let queue = &mut self.queue; 106 107 let mut used_desc_heads = Vec::new(); 108 let mut used_count = 0; 109 110 let mut avail_iter = queue.iter().map_err(Error::QueueIterator)?; 111 for mut desc_chain in &mut avail_iter { 112 let mut request = Request::parse(&mut desc_chain, self.access_platform.as_ref()) 113 .map_err(Error::RequestParsing)?; 114 115 if let Some(rate_limiter) = &mut self.rate_limiter { 116 // If limiter.consume() fails it means there is no more TokenType::Ops 117 // budget and rate limiting is in effect. 118 if !rate_limiter.consume(1, TokenType::Ops) { 119 // Stop processing the queue and return this descriptor chain to the 120 // avail ring, for later processing. 121 avail_iter.go_to_previous_position(); 122 break; 123 } 124 // Exercise the rate limiter only if this request is of data transfer type. 125 if request.request_type == RequestType::In 126 || request.request_type == RequestType::Out 127 { 128 let mut bytes = Wrapping(0); 129 for (_, data_len) in &request.data_descriptors { 130 bytes += Wrapping(*data_len as u64); 131 } 132 133 // If limiter.consume() fails it means there is no more TokenType::Bytes 134 // budget and rate limiting is in effect. 135 if !rate_limiter.consume(bytes.0, TokenType::Bytes) { 136 // Revert the OPS consume(). 137 rate_limiter.manual_replenish(1, TokenType::Ops); 138 // Stop processing the queue and return this descriptor chain to the 139 // avail ring, for later processing. 140 avail_iter.go_to_previous_position(); 141 break; 142 } 143 }; 144 } 145 146 request.set_writeback(self.writeback.load(Ordering::Acquire)); 147 148 if request 149 .execute_async( 150 desc_chain.memory(), 151 self.disk_nsectors, 152 self.disk_image.as_mut(), 153 &self.disk_image_id, 154 desc_chain.head_index() as u64, 155 ) 156 .map_err(Error::RequestExecuting)? 157 { 158 self.request_list.insert(desc_chain.head_index(), request); 159 } else { 160 // We use unwrap because the request parsing process already 161 // checked that the status_addr was valid. 162 desc_chain 163 .memory() 164 .write_obj(VIRTIO_BLK_S_OK, request.status_addr) 165 .unwrap(); 166 167 // If no asynchronous operation has been submitted, we can 168 // simply return the used descriptor. 169 used_desc_heads.push((desc_chain.head_index(), 0)); 170 used_count += 1; 171 } 172 } 173 174 for &(desc_index, len) in used_desc_heads.iter() { 175 queue 176 .add_used(desc_index, len) 177 .map_err(Error::QueueAddUsed)?; 178 } 179 180 Ok(used_count > 0) 181 } 182 183 fn process_queue_complete(&mut self) -> Result<bool> { 184 let queue = &mut self.queue; 185 186 let mut used_desc_heads = Vec::new(); 187 let mut used_count = 0; 188 let mem = self.mem.memory(); 189 let mut read_bytes = Wrapping(0); 190 let mut write_bytes = Wrapping(0); 191 let mut read_ops = Wrapping(0); 192 let mut write_ops = Wrapping(0); 193 194 let completion_list = self.disk_image.complete(); 195 for (user_data, result) in completion_list { 196 let desc_index = user_data as u16; 197 let mut request = self 198 .request_list 199 .remove(&desc_index) 200 .ok_or(Error::MissingEntryRequestList)?; 201 request.complete_async().map_err(Error::RequestCompleting)?; 202 203 let (status, len) = if result >= 0 { 204 match request.request_type { 205 RequestType::In => { 206 for (_, data_len) in &request.data_descriptors { 207 read_bytes += Wrapping(*data_len as u64); 208 } 209 read_ops += Wrapping(1); 210 } 211 RequestType::Out => { 212 if !request.writeback { 213 self.disk_image.fsync(None).map_err(Error::Fsync)?; 214 } 215 for (_, data_len) in &request.data_descriptors { 216 write_bytes += Wrapping(*data_len as u64); 217 } 218 write_ops += Wrapping(1); 219 } 220 _ => {} 221 } 222 223 (VIRTIO_BLK_S_OK, result as u32) 224 } else { 225 error!( 226 "Request failed: {:?}", 227 io::Error::from_raw_os_error(-result) 228 ); 229 return Err(Error::AsyncRequestFailure); 230 }; 231 232 // We use unwrap because the request parsing process already 233 // checked that the status_addr was valid. 234 mem.write_obj(status, request.status_addr).unwrap(); 235 236 used_desc_heads.push((desc_index as u16, len)); 237 used_count += 1; 238 } 239 240 for &(desc_index, len) in used_desc_heads.iter() { 241 queue 242 .add_used(desc_index, len) 243 .map_err(Error::QueueAddUsed)?; 244 } 245 246 self.counters 247 .write_bytes 248 .fetch_add(write_bytes.0, Ordering::AcqRel); 249 self.counters 250 .write_ops 251 .fetch_add(write_ops.0, Ordering::AcqRel); 252 253 self.counters 254 .read_bytes 255 .fetch_add(read_bytes.0, Ordering::AcqRel); 256 self.counters 257 .read_ops 258 .fetch_add(read_ops.0, Ordering::AcqRel); 259 260 Ok(used_count > 0) 261 } 262 263 fn signal_used_queue(&self) -> result::Result<(), DeviceError> { 264 self.interrupt_cb 265 .trigger(VirtioInterruptType::Queue(self.queue_index)) 266 .map_err(|e| { 267 error!("Failed to signal used queue: {:?}", e); 268 DeviceError::FailedSignalingUsedQueue(e) 269 }) 270 } 271 272 fn run( 273 &mut self, 274 paused: Arc<AtomicBool>, 275 paused_sync: Arc<Barrier>, 276 ) -> result::Result<(), EpollHelperError> { 277 let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?; 278 helper.add_event(self.queue_evt.as_raw_fd(), QUEUE_AVAIL_EVENT)?; 279 helper.add_event(self.disk_image.notifier().as_raw_fd(), COMPLETION_EVENT)?; 280 if let Some(rate_limiter) = &self.rate_limiter { 281 helper.add_event(rate_limiter.as_raw_fd(), RATE_LIMITER_EVENT)?; 282 } 283 helper.run(paused, paused_sync, self)?; 284 285 Ok(()) 286 } 287 } 288 289 impl EpollHelperHandler for BlockEpollHandler { 290 fn handle_event(&mut self, _helper: &mut EpollHelper, event: &epoll::Event) -> bool { 291 let ev_type = event.data as u16; 292 match ev_type { 293 QUEUE_AVAIL_EVENT => { 294 if let Err(e) = self.queue_evt.read() { 295 error!("Failed to get queue event: {:?}", e); 296 return true; 297 } 298 299 let rate_limit_reached = 300 self.rate_limiter.as_ref().map_or(false, |r| r.is_blocked()); 301 302 // Process the queue only when the rate limit is not reached 303 if !rate_limit_reached { 304 match self.process_queue_submit() { 305 Ok(needs_notification) => { 306 if needs_notification { 307 if let Err(e) = self.signal_used_queue() { 308 error!("Failed to signal used queue: {:?}", e); 309 return true; 310 } 311 } 312 } 313 Err(e) => { 314 error!("Failed to process queue (submit): {:?}", e); 315 return true; 316 } 317 } 318 } 319 } 320 COMPLETION_EVENT => { 321 if let Err(e) = self.disk_image.notifier().read() { 322 error!("Failed to get queue event: {:?}", e); 323 return true; 324 } 325 326 match self.process_queue_complete() { 327 Ok(needs_notification) => { 328 if needs_notification { 329 if let Err(e) = self.signal_used_queue() { 330 error!("Failed to signal used queue: {:?}", e); 331 return true; 332 } 333 } 334 } 335 Err(e) => { 336 error!("Failed to process queue (complete): {:?}", e); 337 return true; 338 } 339 } 340 } 341 RATE_LIMITER_EVENT => { 342 if let Some(rate_limiter) = &mut self.rate_limiter { 343 // Upon rate limiter event, call the rate limiter handler 344 // and restart processing the queue. 345 if rate_limiter.event_handler().is_ok() { 346 match self.process_queue_submit() { 347 Ok(needs_notification) => { 348 if needs_notification { 349 if let Err(e) = self.signal_used_queue() { 350 error!("Failed to signal used queue: {:?}", e); 351 return true; 352 } 353 } 354 } 355 Err(e) => { 356 error!("Failed to process queue (submit): {:?}", e); 357 return true; 358 } 359 } 360 } 361 } else { 362 error!("Unexpected 'RATE_LIMITER_EVENT' when rate_limiter is not enabled."); 363 return true; 364 } 365 } 366 _ => { 367 error!("Unexpected event: {}", ev_type); 368 return true; 369 } 370 } 371 false 372 } 373 } 374 375 /// Virtio device for exposing block level read/write operations on a host file. 376 pub struct Block { 377 common: VirtioCommon, 378 id: String, 379 disk_image: Box<dyn DiskFile>, 380 disk_path: PathBuf, 381 disk_nsectors: u64, 382 config: VirtioBlockConfig, 383 writeback: Arc<AtomicBool>, 384 counters: BlockCounters, 385 seccomp_action: SeccompAction, 386 rate_limiter_config: Option<RateLimiterConfig>, 387 exit_evt: EventFd, 388 } 389 390 #[derive(Versionize)] 391 pub struct BlockState { 392 pub disk_path: String, 393 pub disk_nsectors: u64, 394 pub avail_features: u64, 395 pub acked_features: u64, 396 pub config: VirtioBlockConfig, 397 } 398 399 impl VersionMapped for BlockState {} 400 401 impl Block { 402 /// Create a new virtio block device that operates on the given file. 403 #[allow(clippy::too_many_arguments)] 404 pub fn new( 405 id: String, 406 mut disk_image: Box<dyn DiskFile>, 407 disk_path: PathBuf, 408 is_disk_read_only: bool, 409 iommu: bool, 410 num_queues: usize, 411 queue_size: u16, 412 seccomp_action: SeccompAction, 413 rate_limiter_config: Option<RateLimiterConfig>, 414 exit_evt: EventFd, 415 ) -> io::Result<Self> { 416 let disk_size = disk_image.size().map_err(|e| { 417 io::Error::new( 418 io::ErrorKind::Other, 419 format!("Failed getting disk size: {}", e), 420 ) 421 })?; 422 if disk_size % SECTOR_SIZE != 0 { 423 warn!( 424 "Disk size {} is not a multiple of sector size {}; \ 425 the remainder will not be visible to the guest.", 426 disk_size, SECTOR_SIZE 427 ); 428 } 429 430 let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) 431 | (1u64 << VIRTIO_BLK_F_FLUSH) 432 | (1u64 << VIRTIO_BLK_F_CONFIG_WCE) 433 | (1u64 << VIRTIO_BLK_F_BLK_SIZE) 434 | (1u64 << VIRTIO_BLK_F_TOPOLOGY); 435 436 if iommu { 437 avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM; 438 } 439 440 if is_disk_read_only { 441 avail_features |= 1u64 << VIRTIO_BLK_F_RO; 442 } 443 444 let topology = disk_image.topology(); 445 info!("Disk topology: {:?}", topology); 446 447 let logical_block_size = if topology.logical_block_size > 512 { 448 topology.logical_block_size 449 } else { 450 512 451 }; 452 453 // Calculate the exponent that maps physical block to logical block 454 let mut physical_block_exp = 0; 455 let mut size = logical_block_size; 456 while size < topology.physical_block_size { 457 physical_block_exp += 1; 458 size <<= 1; 459 } 460 461 let disk_nsectors = disk_size / SECTOR_SIZE; 462 let mut config = VirtioBlockConfig { 463 capacity: disk_nsectors, 464 writeback: 1, 465 blk_size: topology.logical_block_size as u32, 466 physical_block_exp, 467 min_io_size: (topology.minimum_io_size / logical_block_size) as u16, 468 opt_io_size: (topology.optimal_io_size / logical_block_size) as u32, 469 ..Default::default() 470 }; 471 472 if num_queues > 1 { 473 avail_features |= 1u64 << VIRTIO_BLK_F_MQ; 474 config.num_queues = num_queues as u16; 475 } 476 477 Ok(Block { 478 common: VirtioCommon { 479 device_type: VirtioDeviceType::Block as u32, 480 avail_features, 481 paused_sync: Some(Arc::new(Barrier::new(num_queues + 1))), 482 queue_sizes: vec![queue_size; num_queues], 483 min_queues: 1, 484 ..Default::default() 485 }, 486 id, 487 disk_image, 488 disk_path, 489 disk_nsectors, 490 config, 491 writeback: Arc::new(AtomicBool::new(true)), 492 counters: BlockCounters::default(), 493 seccomp_action, 494 rate_limiter_config, 495 exit_evt, 496 }) 497 } 498 499 fn state(&self) -> BlockState { 500 BlockState { 501 disk_path: self.disk_path.to_str().unwrap().to_owned(), 502 disk_nsectors: self.disk_nsectors, 503 avail_features: self.common.avail_features, 504 acked_features: self.common.acked_features, 505 config: self.config, 506 } 507 } 508 509 fn set_state(&mut self, state: &BlockState) { 510 self.disk_path = state.disk_path.clone().into(); 511 self.disk_nsectors = state.disk_nsectors; 512 self.common.avail_features = state.avail_features; 513 self.common.acked_features = state.acked_features; 514 self.config = state.config; 515 } 516 517 fn update_writeback(&mut self) { 518 // Use writeback from config if VIRTIO_BLK_F_CONFIG_WCE 519 let writeback = if self.common.feature_acked(VIRTIO_BLK_F_CONFIG_WCE.into()) { 520 self.config.writeback == 1 521 } else { 522 // Else check if VIRTIO_BLK_F_FLUSH negotiated 523 self.common.feature_acked(VIRTIO_BLK_F_FLUSH.into()) 524 }; 525 526 info!( 527 "Changing cache mode to {}", 528 if writeback { 529 "writeback" 530 } else { 531 "writethrough" 532 } 533 ); 534 self.writeback.store(writeback, Ordering::Release); 535 } 536 } 537 538 impl Drop for Block { 539 fn drop(&mut self) { 540 if let Some(kill_evt) = self.common.kill_evt.take() { 541 // Ignore the result because there is nothing we can do about it. 542 let _ = kill_evt.write(1); 543 } 544 } 545 } 546 547 impl VirtioDevice for Block { 548 fn device_type(&self) -> u32 { 549 self.common.device_type 550 } 551 552 fn queue_max_sizes(&self) -> &[u16] { 553 &self.common.queue_sizes 554 } 555 556 fn features(&self) -> u64 { 557 self.common.avail_features 558 } 559 560 fn ack_features(&mut self, value: u64) { 561 self.common.ack_features(value) 562 } 563 564 fn read_config(&self, offset: u64, data: &mut [u8]) { 565 self.read_config_from_slice(self.config.as_slice(), offset, data); 566 } 567 568 fn write_config(&mut self, offset: u64, data: &[u8]) { 569 // The "writeback" field is the only mutable field 570 let writeback_offset = 571 (&self.config.writeback as *const _ as u64) - (&self.config as *const _ as u64); 572 if offset != writeback_offset || data.len() != std::mem::size_of_val(&self.config.writeback) 573 { 574 error!( 575 "Attempt to write to read-only field: offset {:x} length {}", 576 offset, 577 data.len() 578 ); 579 return; 580 } 581 582 self.config.writeback = data[0]; 583 self.update_writeback(); 584 } 585 586 fn activate( 587 &mut self, 588 mem: GuestMemoryAtomic<GuestMemoryMmap>, 589 interrupt_cb: Arc<dyn VirtioInterrupt>, 590 mut queues: Vec<Queue<GuestMemoryAtomic<GuestMemoryMmap>>>, 591 mut queue_evts: Vec<EventFd>, 592 ) -> ActivateResult { 593 self.common.activate(&queues, &queue_evts, &interrupt_cb)?; 594 595 let disk_image_id = build_disk_image_id(&self.disk_path); 596 self.update_writeback(); 597 598 let mut epoll_threads = Vec::new(); 599 for i in 0..queues.len() { 600 let queue_evt = queue_evts.remove(0); 601 let queue = queues.remove(0); 602 let queue_size = queue.state.size; 603 let (kill_evt, pause_evt) = self.common.dup_eventfds(); 604 605 let rate_limiter: Option<RateLimiter> = self 606 .rate_limiter_config 607 .map(RateLimiterConfig::try_into) 608 .transpose() 609 .map_err(ActivateError::CreateRateLimiter)?; 610 611 let mut handler = BlockEpollHandler { 612 queue_index: i as u16, 613 queue, 614 mem: mem.clone(), 615 disk_image: self 616 .disk_image 617 .new_async_io(queue_size as u32) 618 .map_err(|e| { 619 error!("failed to create new AsyncIo: {}", e); 620 ActivateError::BadActivate 621 })?, 622 disk_nsectors: self.disk_nsectors, 623 interrupt_cb: interrupt_cb.clone(), 624 disk_image_id: disk_image_id.clone(), 625 kill_evt, 626 pause_evt, 627 writeback: self.writeback.clone(), 628 counters: self.counters.clone(), 629 queue_evt, 630 request_list: HashMap::with_capacity(queue_size.into()), 631 rate_limiter, 632 access_platform: self.common.access_platform.clone(), 633 }; 634 635 let paused = self.common.paused.clone(); 636 let paused_sync = self.common.paused_sync.clone(); 637 638 spawn_virtio_thread( 639 &format!("{}_q{}", self.id.clone(), i), 640 &self.seccomp_action, 641 Thread::VirtioBlock, 642 &mut epoll_threads, 643 &self.exit_evt, 644 move || { 645 if let Err(e) = handler.run(paused, paused_sync.unwrap()) { 646 error!("Error running worker: {:?}", e); 647 } 648 }, 649 )?; 650 } 651 652 self.common.epoll_threads = Some(epoll_threads); 653 event!("virtio-device", "activated", "id", &self.id); 654 655 Ok(()) 656 } 657 658 fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> { 659 let result = self.common.reset(); 660 event!("virtio-device", "reset", "id", &self.id); 661 result 662 } 663 664 fn counters(&self) -> Option<HashMap<&'static str, Wrapping<u64>>> { 665 let mut counters = HashMap::new(); 666 667 counters.insert( 668 "read_bytes", 669 Wrapping(self.counters.read_bytes.load(Ordering::Acquire)), 670 ); 671 counters.insert( 672 "write_bytes", 673 Wrapping(self.counters.write_bytes.load(Ordering::Acquire)), 674 ); 675 counters.insert( 676 "read_ops", 677 Wrapping(self.counters.read_ops.load(Ordering::Acquire)), 678 ); 679 counters.insert( 680 "write_ops", 681 Wrapping(self.counters.write_ops.load(Ordering::Acquire)), 682 ); 683 684 Some(counters) 685 } 686 687 fn set_access_platform(&mut self, access_platform: Arc<dyn AccessPlatform>) { 688 self.common.set_access_platform(access_platform) 689 } 690 } 691 692 impl Pausable for Block { 693 fn pause(&mut self) -> result::Result<(), MigratableError> { 694 self.common.pause() 695 } 696 697 fn resume(&mut self) -> result::Result<(), MigratableError> { 698 self.common.resume() 699 } 700 } 701 702 impl Snapshottable for Block { 703 fn id(&self) -> String { 704 self.id.clone() 705 } 706 707 fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> { 708 Snapshot::new_from_versioned_state(&self.id(), &self.state()) 709 } 710 711 fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> { 712 self.set_state(&snapshot.to_versioned_state(&self.id)?); 713 Ok(()) 714 } 715 } 716 impl Transportable for Block {} 717 impl Migratable for Block {} 718