1 #include "kvm/virtio-pci-dev.h" 2 #include "kvm/virtio-net.h" 3 #include "kvm/virtio.h" 4 #include "kvm/types.h" 5 #include "kvm/mutex.h" 6 #include "kvm/util.h" 7 #include "kvm/kvm.h" 8 #include "kvm/irq.h" 9 #include "kvm/uip.h" 10 #include "kvm/guest_compat.h" 11 #include "kvm/iovec.h" 12 13 #include <linux/vhost.h> 14 #include <linux/virtio_net.h> 15 #include <linux/if_tun.h> 16 #include <linux/types.h> 17 18 #include <arpa/inet.h> 19 #include <net/if.h> 20 21 #include <unistd.h> 22 #include <fcntl.h> 23 24 #include <sys/socket.h> 25 #include <sys/ioctl.h> 26 #include <sys/types.h> 27 #include <sys/wait.h> 28 #include <sys/eventfd.h> 29 30 #define VIRTIO_NET_QUEUE_SIZE 256 31 #define VIRTIO_NET_NUM_QUEUES 8 32 33 struct net_dev; 34 35 struct net_dev_operations { 36 int (*rx)(struct iovec *iov, u16 in, struct net_dev *ndev); 37 int (*tx)(struct iovec *iov, u16 in, struct net_dev *ndev); 38 }; 39 40 struct net_dev { 41 struct mutex mutex; 42 struct virtio_device vdev; 43 struct list_head list; 44 45 struct virt_queue vqs[VIRTIO_NET_NUM_QUEUES * 2 + 1]; 46 struct virtio_net_config config; 47 u32 features, rx_vqs, tx_vqs, queue_pairs; 48 49 pthread_t io_thread[VIRTIO_NET_NUM_QUEUES * 2 + 1]; 50 struct mutex io_lock[VIRTIO_NET_NUM_QUEUES * 2 + 1]; 51 pthread_cond_t io_cond[VIRTIO_NET_NUM_QUEUES * 2 + 1]; 52 53 int vhost_fd; 54 int tap_fd; 55 char tap_name[IFNAMSIZ]; 56 57 int mode; 58 59 struct uip_info info; 60 struct net_dev_operations *ops; 61 struct kvm *kvm; 62 63 struct virtio_net_params *params; 64 }; 65 66 static LIST_HEAD(ndevs); 67 static int compat_id = -1; 68 69 #define MAX_PACKET_SIZE 65550 70 71 static bool has_virtio_feature(struct net_dev *ndev, u32 feature) 72 { 73 return ndev->features & (1 << feature); 74 } 75 76 static void *virtio_net_rx_thread(void *p) 77 { 78 struct iovec iov[VIRTIO_NET_QUEUE_SIZE]; 79 struct virt_queue *vq; 80 struct kvm *kvm; 81 struct net_dev *ndev = p; 82 u16 out, in; 83 u16 head; 84 int len, copied; 85 u32 id; 86 87 mutex_lock(&ndev->mutex); 88 id = ndev->rx_vqs++ * 2; 89 mutex_unlock(&ndev->mutex); 90 91 kvm__set_thread_name("virtio-net-rx"); 92 93 kvm = ndev->kvm; 94 vq = &ndev->vqs[id]; 95 96 while (1) { 97 mutex_lock(&ndev->io_lock[id]); 98 if (!virt_queue__available(vq)) 99 pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex); 100 mutex_unlock(&ndev->io_lock[id]); 101 102 while (virt_queue__available(vq)) { 103 unsigned char buffer[MAX_PACKET_SIZE + sizeof(struct virtio_net_hdr_mrg_rxbuf)]; 104 struct iovec dummy_iov = { 105 .iov_base = buffer, 106 .iov_len = sizeof(buffer), 107 }; 108 struct virtio_net_hdr_mrg_rxbuf *hdr; 109 110 len = ndev->ops->rx(&dummy_iov, 1, ndev); 111 if (len < 0) { 112 pr_warning("%s: rx on vq %u failed (%d), exiting thread\n", 113 __func__, id, len); 114 goto out_err; 115 } 116 117 copied = 0; 118 head = virt_queue__get_iov(vq, iov, &out, &in, kvm); 119 hdr = (void *)iov[0].iov_base; 120 while (copied < len) { 121 size_t iovsize = min_t(size_t, len - copied, iov_size(iov, in)); 122 123 memcpy_toiovec(iov, buffer + copied, iovsize); 124 copied += iovsize; 125 if (has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF)) 126 hdr->num_buffers++; 127 virt_queue__set_used_elem(vq, head, iovsize); 128 if (copied == len) 129 break; 130 while (!virt_queue__available(vq)) 131 sleep(0); 132 head = virt_queue__get_iov(vq, iov, &out, &in, kvm); 133 } 134 /* We should interrupt guest right now, otherwise latency is huge. */ 135 if (virtio_queue__should_signal(vq)) 136 ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id); 137 } 138 } 139 140 out_err: 141 pthread_exit(NULL); 142 return NULL; 143 144 } 145 146 static void *virtio_net_tx_thread(void *p) 147 { 148 struct iovec iov[VIRTIO_NET_QUEUE_SIZE]; 149 struct virt_queue *vq; 150 struct kvm *kvm; 151 struct net_dev *ndev = p; 152 u16 out, in; 153 u16 head; 154 int len; 155 u32 id; 156 157 mutex_lock(&ndev->mutex); 158 id = ndev->tx_vqs++ * 2 + 1; 159 mutex_unlock(&ndev->mutex); 160 161 kvm__set_thread_name("virtio-net-tx"); 162 163 kvm = ndev->kvm; 164 vq = &ndev->vqs[id]; 165 166 while (1) { 167 mutex_lock(&ndev->io_lock[id]); 168 if (!virt_queue__available(vq)) 169 pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex); 170 mutex_unlock(&ndev->io_lock[id]); 171 172 while (virt_queue__available(vq)) { 173 head = virt_queue__get_iov(vq, iov, &out, &in, kvm); 174 len = ndev->ops->tx(iov, out, ndev); 175 if (len < 0) { 176 pr_warning("%s: tx on vq %u failed (%d)\n", 177 __func__, id, len); 178 goto out_err; 179 } 180 181 virt_queue__set_used_elem(vq, head, len); 182 } 183 184 if (virtio_queue__should_signal(vq)) 185 ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id); 186 } 187 188 out_err: 189 pthread_exit(NULL); 190 return NULL; 191 } 192 193 static virtio_net_ctrl_ack virtio_net_handle_mq(struct kvm* kvm, struct net_dev *ndev, struct virtio_net_ctrl_hdr *ctrl) 194 { 195 /* Not much to do here */ 196 return VIRTIO_NET_OK; 197 } 198 199 static void *virtio_net_ctrl_thread(void *p) 200 { 201 struct iovec iov[VIRTIO_NET_QUEUE_SIZE]; 202 u16 out, in, head; 203 struct net_dev *ndev = p; 204 struct kvm *kvm = ndev->kvm; 205 u32 id = ndev->queue_pairs * 2; 206 struct virt_queue *vq = &ndev->vqs[id]; 207 struct virtio_net_ctrl_hdr *ctrl; 208 virtio_net_ctrl_ack *ack; 209 210 while (1) { 211 mutex_lock(&ndev->io_lock[id]); 212 if (!virt_queue__available(vq)) 213 pthread_cond_wait(&ndev->io_cond[id], &ndev->io_lock[id].mutex); 214 mutex_unlock(&ndev->io_lock[id]); 215 216 while (virt_queue__available(vq)) { 217 head = virt_queue__get_iov(&ndev->vqs[id], iov, &out, &in, kvm); 218 ctrl = iov[0].iov_base; 219 ack = iov[out].iov_base; 220 221 switch (ctrl->class) { 222 case VIRTIO_NET_CTRL_MQ: 223 *ack = virtio_net_handle_mq(kvm, ndev, ctrl); 224 break; 225 default: 226 *ack = VIRTIO_NET_ERR; 227 break; 228 } 229 virt_queue__set_used_elem(&ndev->vqs[id], head, iov[out].iov_len); 230 } 231 232 if (virtio_queue__should_signal(&ndev->vqs[id])) 233 ndev->vdev.ops->signal_vq(kvm, &ndev->vdev, id); 234 } 235 236 pthread_exit(NULL); 237 238 return NULL; 239 } 240 241 static void virtio_net_handle_callback(struct kvm *kvm, struct net_dev *ndev, int queue) 242 { 243 if ((u32)queue >= (ndev->queue_pairs * 2 + 1)) { 244 pr_warning("Unknown queue index %u", queue); 245 return; 246 } 247 248 mutex_lock(&ndev->io_lock[queue]); 249 pthread_cond_signal(&ndev->io_cond[queue]); 250 mutex_unlock(&ndev->io_lock[queue]); 251 } 252 253 static bool virtio_net__tap_init(struct net_dev *ndev) 254 { 255 int sock = socket(AF_INET, SOCK_STREAM, 0); 256 int pid, status, offload, hdr_len; 257 struct sockaddr_in sin = {0}; 258 struct ifreq ifr; 259 const struct virtio_net_params *params = ndev->params; 260 bool skipconf = !!params->tapif; 261 262 /* Did the user already gave us the FD? */ 263 if (params->fd) { 264 ndev->tap_fd = params->fd; 265 return 1; 266 } 267 268 ndev->tap_fd = open("/dev/net/tun", O_RDWR); 269 if (ndev->tap_fd < 0) { 270 pr_warning("Unable to open /dev/net/tun"); 271 goto fail; 272 } 273 274 memset(&ifr, 0, sizeof(ifr)); 275 ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; 276 if (params->tapif) 277 strncpy(ifr.ifr_name, params->tapif, sizeof(ifr.ifr_name)); 278 if (ioctl(ndev->tap_fd, TUNSETIFF, &ifr) < 0) { 279 pr_warning("Config tap device error. Are you root?"); 280 goto fail; 281 } 282 283 strncpy(ndev->tap_name, ifr.ifr_name, sizeof(ndev->tap_name)); 284 285 if (ioctl(ndev->tap_fd, TUNSETNOCSUM, 1) < 0) { 286 pr_warning("Config tap device TUNSETNOCSUM error"); 287 goto fail; 288 } 289 290 hdr_len = has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF) ? 291 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 292 sizeof(struct virtio_net_hdr); 293 if (ioctl(ndev->tap_fd, TUNSETVNETHDRSZ, &hdr_len) < 0) 294 pr_warning("Config tap device TUNSETVNETHDRSZ error"); 295 296 offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_UFO; 297 if (ioctl(ndev->tap_fd, TUNSETOFFLOAD, offload) < 0) { 298 pr_warning("Config tap device TUNSETOFFLOAD error"); 299 goto fail; 300 } 301 302 if (strcmp(params->script, "none")) { 303 pid = fork(); 304 if (pid == 0) { 305 execl(params->script, params->script, ndev->tap_name, NULL); 306 _exit(1); 307 } else { 308 waitpid(pid, &status, 0); 309 if (WIFEXITED(status) && WEXITSTATUS(status) != 0) { 310 pr_warning("Fail to setup tap by %s", params->script); 311 goto fail; 312 } 313 } 314 } else if (!skipconf) { 315 memset(&ifr, 0, sizeof(ifr)); 316 strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name)); 317 sin.sin_addr.s_addr = inet_addr(params->host_ip); 318 memcpy(&(ifr.ifr_addr), &sin, sizeof(ifr.ifr_addr)); 319 ifr.ifr_addr.sa_family = AF_INET; 320 if (ioctl(sock, SIOCSIFADDR, &ifr) < 0) { 321 pr_warning("Could not set ip address on tap device"); 322 goto fail; 323 } 324 } 325 326 if (!skipconf) { 327 memset(&ifr, 0, sizeof(ifr)); 328 strncpy(ifr.ifr_name, ndev->tap_name, sizeof(ndev->tap_name)); 329 ioctl(sock, SIOCGIFFLAGS, &ifr); 330 ifr.ifr_flags |= IFF_UP | IFF_RUNNING; 331 if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0) 332 pr_warning("Could not bring tap device up"); 333 } 334 335 close(sock); 336 337 return 1; 338 339 fail: 340 if (sock >= 0) 341 close(sock); 342 if (ndev->tap_fd >= 0) 343 close(ndev->tap_fd); 344 345 return 0; 346 } 347 348 static inline int tap_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev) 349 { 350 return writev(ndev->tap_fd, iov, out); 351 } 352 353 static inline int tap_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev) 354 { 355 return readv(ndev->tap_fd, iov, in); 356 } 357 358 static inline int uip_ops_tx(struct iovec *iov, u16 out, struct net_dev *ndev) 359 { 360 return uip_tx(iov, out, &ndev->info); 361 } 362 363 static inline int uip_ops_rx(struct iovec *iov, u16 in, struct net_dev *ndev) 364 { 365 return uip_rx(iov, in, &ndev->info); 366 } 367 368 static struct net_dev_operations tap_ops = { 369 .rx = tap_ops_rx, 370 .tx = tap_ops_tx, 371 }; 372 373 static struct net_dev_operations uip_ops = { 374 .rx = uip_ops_rx, 375 .tx = uip_ops_tx, 376 }; 377 378 static u8 *get_config(struct kvm *kvm, void *dev) 379 { 380 struct net_dev *ndev = dev; 381 382 return ((u8 *)(&ndev->config)); 383 } 384 385 static u32 get_host_features(struct kvm *kvm, void *dev) 386 { 387 struct net_dev *ndev = dev; 388 389 return 1UL << VIRTIO_NET_F_MAC 390 | 1UL << VIRTIO_NET_F_CSUM 391 | 1UL << VIRTIO_NET_F_HOST_UFO 392 | 1UL << VIRTIO_NET_F_HOST_TSO4 393 | 1UL << VIRTIO_NET_F_HOST_TSO6 394 | 1UL << VIRTIO_NET_F_GUEST_UFO 395 | 1UL << VIRTIO_NET_F_GUEST_TSO4 396 | 1UL << VIRTIO_NET_F_GUEST_TSO6 397 | 1UL << VIRTIO_RING_F_EVENT_IDX 398 | 1UL << VIRTIO_RING_F_INDIRECT_DESC 399 | 1UL << VIRTIO_NET_F_CTRL_VQ 400 | 1UL << VIRTIO_NET_F_MRG_RXBUF 401 | 1UL << (ndev->queue_pairs > 1 ? VIRTIO_NET_F_MQ : 0); 402 } 403 404 static int virtio_net__vhost_set_features(struct net_dev *ndev) 405 { 406 u64 features = 1UL << VIRTIO_RING_F_EVENT_IDX; 407 u64 vhost_features; 408 409 if (ioctl(ndev->vhost_fd, VHOST_GET_FEATURES, &vhost_features) != 0) 410 die_perror("VHOST_GET_FEATURES failed"); 411 412 /* make sure both side support mergable rx buffers */ 413 if (vhost_features & 1UL << VIRTIO_NET_F_MRG_RXBUF && 414 has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF)) 415 features |= 1UL << VIRTIO_NET_F_MRG_RXBUF; 416 417 return ioctl(ndev->vhost_fd, VHOST_SET_FEATURES, &features); 418 } 419 420 static void set_guest_features(struct kvm *kvm, void *dev, u32 features) 421 { 422 struct net_dev *ndev = dev; 423 424 ndev->features = features; 425 426 if (ndev->mode == NET_MODE_TAP) { 427 if (!virtio_net__tap_init(ndev)) 428 die_perror("You have requested a TAP device, but creation of one has failed because"); 429 if (ndev->vhost_fd && 430 virtio_net__vhost_set_features(ndev) != 0) 431 die_perror("VHOST_SET_FEATURES failed"); 432 } else { 433 ndev->info.vnet_hdr_len = has_virtio_feature(ndev, VIRTIO_NET_F_MRG_RXBUF) ? 434 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 435 sizeof(struct virtio_net_hdr); 436 uip_init(&ndev->info); 437 } 438 } 439 440 static bool is_ctrl_vq(struct net_dev *ndev, u32 vq) 441 { 442 return vq == (u32)(ndev->queue_pairs * 2); 443 } 444 445 static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align, 446 u32 pfn) 447 { 448 struct vhost_vring_state state = { .index = vq }; 449 struct vhost_vring_addr addr; 450 struct net_dev *ndev = dev; 451 struct virt_queue *queue; 452 void *p; 453 int r; 454 455 compat__remove_message(compat_id); 456 457 queue = &ndev->vqs[vq]; 458 queue->pfn = pfn; 459 p = virtio_get_vq(kvm, queue->pfn, page_size); 460 461 vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, align); 462 463 mutex_init(&ndev->io_lock[vq]); 464 pthread_cond_init(&ndev->io_cond[vq], NULL); 465 if (is_ctrl_vq(ndev, vq)) { 466 pthread_create(&ndev->io_thread[vq], NULL, virtio_net_ctrl_thread, ndev); 467 468 return 0; 469 } else if (ndev->vhost_fd == 0 ) { 470 if (vq & 1) 471 pthread_create(&ndev->io_thread[vq], NULL, virtio_net_tx_thread, ndev); 472 else 473 pthread_create(&ndev->io_thread[vq], NULL, virtio_net_rx_thread, ndev); 474 475 return 0; 476 } 477 478 state.num = queue->vring.num; 479 r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_NUM, &state); 480 if (r < 0) 481 die_perror("VHOST_SET_VRING_NUM failed"); 482 state.num = 0; 483 r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_BASE, &state); 484 if (r < 0) 485 die_perror("VHOST_SET_VRING_BASE failed"); 486 487 addr = (struct vhost_vring_addr) { 488 .index = vq, 489 .desc_user_addr = (u64)(unsigned long)queue->vring.desc, 490 .avail_user_addr = (u64)(unsigned long)queue->vring.avail, 491 .used_user_addr = (u64)(unsigned long)queue->vring.used, 492 }; 493 494 r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_ADDR, &addr); 495 if (r < 0) 496 die_perror("VHOST_SET_VRING_ADDR failed"); 497 498 return 0; 499 } 500 501 static void notify_vq_gsi(struct kvm *kvm, void *dev, u32 vq, u32 gsi) 502 { 503 struct net_dev *ndev = dev; 504 struct kvm_irqfd irq; 505 struct vhost_vring_file file; 506 int r; 507 508 if (ndev->vhost_fd == 0) 509 return; 510 511 irq = (struct kvm_irqfd) { 512 .gsi = gsi, 513 .fd = eventfd(0, 0), 514 }; 515 file = (struct vhost_vring_file) { 516 .index = vq, 517 .fd = irq.fd, 518 }; 519 520 r = ioctl(kvm->vm_fd, KVM_IRQFD, &irq); 521 if (r < 0) 522 die_perror("KVM_IRQFD failed"); 523 524 r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_CALL, &file); 525 if (r < 0) 526 die_perror("VHOST_SET_VRING_CALL failed"); 527 file.fd = ndev->tap_fd; 528 r = ioctl(ndev->vhost_fd, VHOST_NET_SET_BACKEND, &file); 529 if (r != 0) 530 die("VHOST_NET_SET_BACKEND failed %d", errno); 531 532 } 533 534 static void notify_vq_eventfd(struct kvm *kvm, void *dev, u32 vq, u32 efd) 535 { 536 struct net_dev *ndev = dev; 537 struct vhost_vring_file file = { 538 .index = vq, 539 .fd = efd, 540 }; 541 int r; 542 543 if (ndev->vhost_fd == 0 || is_ctrl_vq(ndev, vq)) 544 return; 545 546 r = ioctl(ndev->vhost_fd, VHOST_SET_VRING_KICK, &file); 547 if (r < 0) 548 die_perror("VHOST_SET_VRING_KICK failed"); 549 } 550 551 static int notify_vq(struct kvm *kvm, void *dev, u32 vq) 552 { 553 struct net_dev *ndev = dev; 554 555 virtio_net_handle_callback(kvm, ndev, vq); 556 557 return 0; 558 } 559 560 static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq) 561 { 562 struct net_dev *ndev = dev; 563 564 return ndev->vqs[vq].pfn; 565 } 566 567 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq) 568 { 569 /* FIXME: dynamic */ 570 return VIRTIO_NET_QUEUE_SIZE; 571 } 572 573 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size) 574 { 575 /* FIXME: dynamic */ 576 return size; 577 } 578 579 static struct virtio_ops net_dev_virtio_ops = (struct virtio_ops) { 580 .get_config = get_config, 581 .get_host_features = get_host_features, 582 .set_guest_features = set_guest_features, 583 .init_vq = init_vq, 584 .get_pfn_vq = get_pfn_vq, 585 .get_size_vq = get_size_vq, 586 .set_size_vq = set_size_vq, 587 .notify_vq = notify_vq, 588 .notify_vq_gsi = notify_vq_gsi, 589 .notify_vq_eventfd = notify_vq_eventfd, 590 }; 591 592 static void virtio_net__vhost_init(struct kvm *kvm, struct net_dev *ndev) 593 { 594 struct vhost_memory *mem; 595 int r; 596 597 ndev->vhost_fd = open("/dev/vhost-net", O_RDWR); 598 if (ndev->vhost_fd < 0) 599 die_perror("Failed openning vhost-net device"); 600 601 mem = calloc(1, sizeof(*mem) + sizeof(struct vhost_memory_region)); 602 if (mem == NULL) 603 die("Failed allocating memory for vhost memory map"); 604 605 mem->nregions = 1; 606 mem->regions[0] = (struct vhost_memory_region) { 607 .guest_phys_addr = 0, 608 .memory_size = kvm->ram_size, 609 .userspace_addr = (unsigned long)kvm->ram_start, 610 }; 611 612 r = ioctl(ndev->vhost_fd, VHOST_SET_OWNER); 613 if (r != 0) 614 die_perror("VHOST_SET_OWNER failed"); 615 616 r = ioctl(ndev->vhost_fd, VHOST_SET_MEM_TABLE, mem); 617 if (r != 0) 618 die_perror("VHOST_SET_MEM_TABLE failed"); 619 620 ndev->vdev.use_vhost = true; 621 622 free(mem); 623 } 624 625 static inline void str_to_mac(const char *str, char *mac) 626 { 627 sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", 628 mac, mac+1, mac+2, mac+3, mac+4, mac+5); 629 } 630 static int set_net_param(struct kvm *kvm, struct virtio_net_params *p, 631 const char *param, const char *val) 632 { 633 if (strcmp(param, "guest_mac") == 0) { 634 str_to_mac(val, p->guest_mac); 635 } else if (strcmp(param, "mode") == 0) { 636 if (!strncmp(val, "user", 4)) { 637 int i; 638 639 for (i = 0; i < kvm->cfg.num_net_devices; i++) 640 if (kvm->cfg.net_params[i].mode == NET_MODE_USER) 641 die("Only one usermode network device allowed at a time"); 642 p->mode = NET_MODE_USER; 643 } else if (!strncmp(val, "tap", 3)) { 644 p->mode = NET_MODE_TAP; 645 } else if (!strncmp(val, "none", 4)) { 646 kvm->cfg.no_net = 1; 647 return -1; 648 } else 649 die("Unknown network mode %s, please use user, tap or none", kvm->cfg.network); 650 } else if (strcmp(param, "script") == 0) { 651 p->script = strdup(val); 652 } else if (strcmp(param, "guest_ip") == 0) { 653 p->guest_ip = strdup(val); 654 } else if (strcmp(param, "host_ip") == 0) { 655 p->host_ip = strdup(val); 656 } else if (strcmp(param, "trans") == 0) { 657 p->trans = strdup(val); 658 } else if (strcmp(param, "tapif") == 0) { 659 p->tapif = strdup(val); 660 } else if (strcmp(param, "vhost") == 0) { 661 p->vhost = atoi(val); 662 } else if (strcmp(param, "fd") == 0) { 663 p->fd = atoi(val); 664 } else if (strcmp(param, "mq") == 0) { 665 p->mq = atoi(val); 666 } else 667 die("Unknown network parameter %s", param); 668 669 return 0; 670 } 671 672 int netdev_parser(const struct option *opt, const char *arg, int unset) 673 { 674 struct virtio_net_params p; 675 char *buf = NULL, *cmd = NULL, *cur = NULL; 676 bool on_cmd = true; 677 struct kvm *kvm = opt->ptr; 678 679 if (arg) { 680 buf = strdup(arg); 681 if (buf == NULL) 682 die("Failed allocating new net buffer"); 683 cur = strtok(buf, ",="); 684 } 685 686 p = (struct virtio_net_params) { 687 .guest_ip = DEFAULT_GUEST_ADDR, 688 .host_ip = DEFAULT_HOST_ADDR, 689 .script = DEFAULT_SCRIPT, 690 .mode = NET_MODE_TAP, 691 }; 692 693 str_to_mac(DEFAULT_GUEST_MAC, p.guest_mac); 694 p.guest_mac[5] += kvm->cfg.num_net_devices; 695 696 while (cur) { 697 if (on_cmd) { 698 cmd = cur; 699 } else { 700 if (set_net_param(kvm, &p, cmd, cur) < 0) 701 goto done; 702 } 703 on_cmd = !on_cmd; 704 705 cur = strtok(NULL, ",="); 706 }; 707 708 kvm->cfg.num_net_devices++; 709 710 kvm->cfg.net_params = realloc(kvm->cfg.net_params, kvm->cfg.num_net_devices * sizeof(*kvm->cfg.net_params)); 711 if (kvm->cfg.net_params == NULL) 712 die("Failed adding new network device"); 713 714 kvm->cfg.net_params[kvm->cfg.num_net_devices - 1] = p; 715 716 done: 717 free(buf); 718 return 0; 719 } 720 721 static int virtio_net__init_one(struct virtio_net_params *params) 722 { 723 int i, err; 724 struct net_dev *ndev; 725 struct virtio_ops *ops; 726 727 ndev = calloc(1, sizeof(struct net_dev)); 728 if (ndev == NULL) 729 return -ENOMEM; 730 731 ops = malloc(sizeof(*ops)); 732 if (ops == NULL) { 733 err = -ENOMEM; 734 goto err_free_ndev; 735 } 736 737 list_add_tail(&ndev->list, &ndevs); 738 739 ndev->kvm = params->kvm; 740 ndev->params = params; 741 742 mutex_init(&ndev->mutex); 743 ndev->queue_pairs = max(1, min(VIRTIO_NET_NUM_QUEUES, params->mq)); 744 ndev->config.status = VIRTIO_NET_S_LINK_UP; 745 if (ndev->queue_pairs > 1) 746 ndev->config.max_virtqueue_pairs = ndev->queue_pairs; 747 748 for (i = 0 ; i < 6 ; i++) { 749 ndev->config.mac[i] = params->guest_mac[i]; 750 ndev->info.guest_mac.addr[i] = params->guest_mac[i]; 751 ndev->info.host_mac.addr[i] = params->host_mac[i]; 752 } 753 754 ndev->mode = params->mode; 755 if (ndev->mode == NET_MODE_TAP) { 756 ndev->ops = &tap_ops; 757 } else { 758 ndev->info.host_ip = ntohl(inet_addr(params->host_ip)); 759 ndev->info.guest_ip = ntohl(inet_addr(params->guest_ip)); 760 ndev->info.guest_netmask = ntohl(inet_addr("255.255.255.0")); 761 ndev->info.buf_nr = 20, 762 ndev->ops = &uip_ops; 763 uip_static_init(&ndev->info); 764 } 765 766 *ops = net_dev_virtio_ops; 767 if (params->trans && strcmp(params->trans, "mmio") == 0) 768 virtio_init(params->kvm, ndev, &ndev->vdev, ops, VIRTIO_MMIO, 769 PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET); 770 else 771 virtio_init(params->kvm, ndev, &ndev->vdev, ops, VIRTIO_PCI, 772 PCI_DEVICE_ID_VIRTIO_NET, VIRTIO_ID_NET, PCI_CLASS_NET); 773 774 if (params->vhost) 775 virtio_net__vhost_init(params->kvm, ndev); 776 777 if (compat_id == -1) 778 compat_id = virtio_compat_add_message("virtio-net", "CONFIG_VIRTIO_NET"); 779 780 return 0; 781 782 err_free_ndev: 783 free(ndev); 784 return err; 785 } 786 787 int virtio_net__init(struct kvm *kvm) 788 { 789 int i; 790 791 for (i = 0; i < kvm->cfg.num_net_devices; i++) { 792 kvm->cfg.net_params[i].kvm = kvm; 793 virtio_net__init_one(&kvm->cfg.net_params[i]); 794 } 795 796 if (kvm->cfg.num_net_devices == 0 && kvm->cfg.no_net == 0) { 797 static struct virtio_net_params net_params; 798 799 net_params = (struct virtio_net_params) { 800 .guest_ip = kvm->cfg.guest_ip, 801 .host_ip = kvm->cfg.host_ip, 802 .kvm = kvm, 803 .script = kvm->cfg.script, 804 .mode = NET_MODE_USER, 805 }; 806 str_to_mac(kvm->cfg.guest_mac, net_params.guest_mac); 807 str_to_mac(kvm->cfg.host_mac, net_params.host_mac); 808 809 virtio_net__init_one(&net_params); 810 } 811 812 return 0; 813 } 814 virtio_dev_init(virtio_net__init); 815 816 int virtio_net__exit(struct kvm *kvm) 817 { 818 return 0; 819 } 820 virtio_dev_exit(virtio_net__exit); 821