1376ac44cSSasha Levin #include "kvm/virtio-rng.h" 2376ac44cSSasha Levin 331638bcaSCyrill Gorcunov #include "kvm/virtio-pci-dev.h" 4376ac44cSSasha Levin 5376ac44cSSasha Levin #include "kvm/disk-image.h" 6376ac44cSSasha Levin #include "kvm/virtio.h" 7376ac44cSSasha Levin #include "kvm/ioport.h" 8376ac44cSSasha Levin #include "kvm/util.h" 9376ac44cSSasha Levin #include "kvm/kvm.h" 10376ac44cSSasha Levin #include "kvm/pci.h" 11376ac44cSSasha Levin #include "kvm/threadpool.h" 122449f6e3SSasha Levin #include "kvm/irq.h" 13b2533581SSasha Levin #include "kvm/ioeventfd.h" 14376ac44cSSasha Levin 15376ac44cSSasha Levin #include <linux/virtio_ring.h> 16376ac44cSSasha Levin #include <linux/virtio_rng.h> 17376ac44cSSasha Levin 1880ac1d05SSasha Levin #include <linux/list.h> 19376ac44cSSasha Levin #include <fcntl.h> 20376ac44cSSasha Levin #include <sys/types.h> 21376ac44cSSasha Levin #include <sys/stat.h> 22376ac44cSSasha Levin #include <pthread.h> 23376ac44cSSasha Levin 24376ac44cSSasha Levin #define NUM_VIRT_QUEUES 1 25376ac44cSSasha Levin #define VIRTIO_RNG_QUEUE_SIZE 128 26376ac44cSSasha Levin 2780ac1d05SSasha Levin struct rng_dev_job { 2880ac1d05SSasha Levin struct virt_queue *vq; 2980ac1d05SSasha Levin struct rng_dev *rdev; 30df0c7f57SSasha Levin struct thread_pool__job job_id; 312449f6e3SSasha Levin }; 322449f6e3SSasha Levin 3380ffe4d1SSasha Levin struct rng_dev { 3480ac1d05SSasha Levin struct pci_device_header pci_hdr; 3580ac1d05SSasha Levin struct list_head list; 3680ac1d05SSasha Levin 3780ac1d05SSasha Levin u16 base_addr; 383fdf659dSSasha Levin u8 status; 39d2963622SAsias He u8 isr; 403fdf659dSSasha Levin u16 config_vector; 4180ffe4d1SSasha Levin int fd; 42*bc485053SSasha Levin u32 vq_vector[NUM_VIRT_QUEUES]; 43*bc485053SSasha Levin u32 msix_io_block; 44376ac44cSSasha Levin 45376ac44cSSasha Levin /* virtio queue */ 463fdf659dSSasha Levin u16 queue_selector; 47376ac44cSSasha Levin struct virt_queue vqs[NUM_VIRT_QUEUES]; 4880ac1d05SSasha Levin struct rng_dev_job jobs[NUM_VIRT_QUEUES]; 49376ac44cSSasha Levin }; 50376ac44cSSasha Levin 5180ac1d05SSasha Levin static LIST_HEAD(rdevs); 52376ac44cSSasha Levin 533d62dea6SSasha Levin static bool virtio_rng_pci_io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count) 54376ac44cSSasha Levin { 55376ac44cSSasha Levin unsigned long offset; 56376ac44cSSasha Levin bool ret = true; 5780ac1d05SSasha Levin struct rng_dev *rdev; 58376ac44cSSasha Levin 5980ac1d05SSasha Levin rdev = ioport->priv; 6080ac1d05SSasha Levin offset = port - rdev->base_addr; 61376ac44cSSasha Levin 62376ac44cSSasha Levin switch (offset) { 63376ac44cSSasha Levin case VIRTIO_PCI_HOST_FEATURES: 64376ac44cSSasha Levin case VIRTIO_PCI_GUEST_FEATURES: 65376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_SEL: 66376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_NOTIFY: 67376ac44cSSasha Levin ret = false; 68376ac44cSSasha Levin break; 69376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_PFN: 7080ac1d05SSasha Levin ioport__write32(data, rdev->vqs[rdev->queue_selector].pfn); 71376ac44cSSasha Levin break; 72376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_NUM: 73376ac44cSSasha Levin ioport__write16(data, VIRTIO_RNG_QUEUE_SIZE); 74376ac44cSSasha Levin break; 75376ac44cSSasha Levin case VIRTIO_PCI_STATUS: 7680ac1d05SSasha Levin ioport__write8(data, rdev->status); 77376ac44cSSasha Levin break; 78376ac44cSSasha Levin case VIRTIO_PCI_ISR: 7980ac1d05SSasha Levin ioport__write8(data, rdev->isr); 8080ac1d05SSasha Levin kvm__irq_line(kvm, rdev->pci_hdr.irq_line, VIRTIO_IRQ_LOW); 8180ac1d05SSasha Levin rdev->isr = VIRTIO_IRQ_LOW; 82376ac44cSSasha Levin break; 83376ac44cSSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 8480ac1d05SSasha Levin ioport__write16(data, rdev->config_vector); 85376ac44cSSasha Levin break; 86*bc485053SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: 87*bc485053SSasha Levin ioport__write16(data, rdev->vq_vector[rdev->queue_selector]); 88*bc485053SSasha Levin break; 89376ac44cSSasha Levin default: 90376ac44cSSasha Levin ret = false; 91407475bfSPekka Enberg break; 92376ac44cSSasha Levin }; 93376ac44cSSasha Levin 94376ac44cSSasha Levin return ret; 95376ac44cSSasha Levin } 96376ac44cSSasha Levin 9780ac1d05SSasha Levin static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, struct virt_queue *queue) 98376ac44cSSasha Levin { 99376ac44cSSasha Levin struct iovec iov[VIRTIO_RNG_QUEUE_SIZE]; 100376ac44cSSasha Levin unsigned int len = 0; 101407475bfSPekka Enberg u16 out, in, head; 102376ac44cSSasha Levin 10380ffe4d1SSasha Levin head = virt_queue__get_iov(queue, iov, &out, &in, kvm); 10480ac1d05SSasha Levin len = readv(rdev->fd, iov, in); 105407475bfSPekka Enberg 106376ac44cSSasha Levin virt_queue__set_used_elem(queue, head, len); 107376ac44cSSasha Levin 108376ac44cSSasha Levin return true; 109376ac44cSSasha Levin } 110376ac44cSSasha Levin 111376ac44cSSasha Levin static void virtio_rng_do_io(struct kvm *kvm, void *param) 112376ac44cSSasha Levin { 11380ac1d05SSasha Levin struct rng_dev_job *job = param; 11480ac1d05SSasha Levin struct virt_queue *vq = job->vq; 11580ac1d05SSasha Levin struct rng_dev *rdev = job->rdev; 116376ac44cSSasha Levin 117*bc485053SSasha Levin while (virt_queue__available(vq)) 11880ac1d05SSasha Levin virtio_rng_do_io_request(kvm, rdev, vq); 119*bc485053SSasha Levin 120*bc485053SSasha Levin kvm__irq_line(kvm, rdev->pci_hdr.irq_line, VIRTIO_IRQ_HIGH); 121376ac44cSSasha Levin } 122376ac44cSSasha Levin 1233d62dea6SSasha Levin static bool virtio_rng_pci_io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size, u32 count) 124376ac44cSSasha Levin { 125376ac44cSSasha Levin unsigned long offset; 126376ac44cSSasha Levin bool ret = true; 12780ac1d05SSasha Levin struct rng_dev *rdev; 128376ac44cSSasha Levin 12980ac1d05SSasha Levin rdev = ioport->priv; 13080ac1d05SSasha Levin offset = port - rdev->base_addr; 131376ac44cSSasha Levin 132376ac44cSSasha Levin switch (offset) { 133376ac44cSSasha Levin case VIRTIO_PCI_GUEST_FEATURES: 134376ac44cSSasha Levin break; 135376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_PFN: { 136376ac44cSSasha Levin struct virt_queue *queue; 13780ac1d05SSasha Levin struct rng_dev_job *job; 138376ac44cSSasha Levin void *p; 139376ac44cSSasha Levin 14080ac1d05SSasha Levin queue = &rdev->vqs[rdev->queue_selector]; 141376ac44cSSasha Levin queue->pfn = ioport__read32(data); 142aaf0b445SSasha Levin p = guest_pfn_to_host(kvm, queue->pfn); 143376ac44cSSasha Levin 14480ac1d05SSasha Levin job = &rdev->jobs[rdev->queue_selector]; 14580ac1d05SSasha Levin 146b8f43678SSasha Levin vring_init(&queue->vring, VIRTIO_RNG_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN); 147376ac44cSSasha Levin 14880ac1d05SSasha Levin *job = (struct rng_dev_job) { 14980ac1d05SSasha Levin .vq = queue, 15080ac1d05SSasha Levin .rdev = rdev, 15180ac1d05SSasha Levin }; 15280ac1d05SSasha Levin 153df0c7f57SSasha Levin thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job); 154376ac44cSSasha Levin 155376ac44cSSasha Levin break; 156376ac44cSSasha Levin } 157376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_SEL: 15880ac1d05SSasha Levin rdev->queue_selector = ioport__read16(data); 159376ac44cSSasha Levin break; 160376ac44cSSasha Levin case VIRTIO_PCI_QUEUE_NOTIFY: { 1613fdf659dSSasha Levin u16 queue_index; 162376ac44cSSasha Levin queue_index = ioport__read16(data); 163df0c7f57SSasha Levin thread_pool__do_job(&rdev->jobs[queue_index].job_id); 164376ac44cSSasha Levin break; 165376ac44cSSasha Levin } 166376ac44cSSasha Levin case VIRTIO_PCI_STATUS: 16780ac1d05SSasha Levin rdev->status = ioport__read8(data); 168376ac44cSSasha Levin break; 169376ac44cSSasha Levin case VIRTIO_MSI_CONFIG_VECTOR: 170*bc485053SSasha Levin rdev->config_vector = ioport__read16(data); 171376ac44cSSasha Levin break; 172*bc485053SSasha Levin case VIRTIO_MSI_QUEUE_VECTOR: { 173*bc485053SSasha Levin u32 gsi; 174*bc485053SSasha Levin u32 vec; 175*bc485053SSasha Levin 176*bc485053SSasha Levin vec = rdev->vq_vector[rdev->queue_selector] = ioport__read16(data); 177*bc485053SSasha Levin 178*bc485053SSasha Levin gsi = irq__add_msix_route(kvm, 179*bc485053SSasha Levin rdev->pci_hdr.msix.table[vec].low, 180*bc485053SSasha Levin rdev->pci_hdr.msix.table[vec].high, 181*bc485053SSasha Levin rdev->pci_hdr.msix.table[vec].data); 182*bc485053SSasha Levin rdev->pci_hdr.irq_line = gsi; 183*bc485053SSasha Levin break; 184*bc485053SSasha Levin } 185376ac44cSSasha Levin default: 186376ac44cSSasha Levin ret = false; 187407475bfSPekka Enberg break; 188376ac44cSSasha Levin }; 189376ac44cSSasha Levin 190376ac44cSSasha Levin return ret; 191376ac44cSSasha Levin } 192376ac44cSSasha Levin 193376ac44cSSasha Levin static struct ioport_operations virtio_rng_io_ops = { 194376ac44cSSasha Levin .io_in = virtio_rng_pci_io_in, 195376ac44cSSasha Levin .io_out = virtio_rng_pci_io_out, 196376ac44cSSasha Levin }; 197376ac44cSSasha Levin 198b2533581SSasha Levin static void ioevent_callback(struct kvm *kvm, void *param) 199b2533581SSasha Levin { 200b2533581SSasha Levin struct rng_dev_job *job = param; 201b2533581SSasha Levin 202df0c7f57SSasha Levin thread_pool__do_job(&job->job_id); 203b2533581SSasha Levin } 204b2533581SSasha Levin 205*bc485053SSasha Levin static void callback_mmio(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr) 206*bc485053SSasha Levin { 207*bc485053SSasha Levin struct rng_dev *rdev = ptr; 208*bc485053SSasha Levin void *table = &rdev->pci_hdr.msix.table; 209*bc485053SSasha Levin if (is_write) 210*bc485053SSasha Levin memcpy(table + addr - rdev->msix_io_block, data, len); 211*bc485053SSasha Levin else 212*bc485053SSasha Levin memcpy(data, table + addr - rdev->msix_io_block, len); 213*bc485053SSasha Levin } 214*bc485053SSasha Levin 215376ac44cSSasha Levin void virtio_rng__init(struct kvm *kvm) 216376ac44cSSasha Levin { 217b2533581SSasha Levin u8 pin, line, dev, i; 21880ac1d05SSasha Levin u16 rdev_base_addr; 21980ac1d05SSasha Levin struct rng_dev *rdev; 220b2533581SSasha Levin struct ioevent ioevent; 2212449f6e3SSasha Levin 22280ac1d05SSasha Levin rdev = malloc(sizeof(*rdev)); 22380ac1d05SSasha Levin if (rdev == NULL) 22480ac1d05SSasha Levin return; 22580ac1d05SSasha Levin 226*bc485053SSasha Levin rdev->msix_io_block = pci_get_io_space_block(); 227*bc485053SSasha Levin 22880ac1d05SSasha Levin rdev_base_addr = ioport__register(IOPORT_EMPTY, &virtio_rng_io_ops, IOPORT_SIZE, rdev); 229*bc485053SSasha Levin kvm__register_mmio(kvm, rdev->msix_io_block, 0x100, callback_mmio, rdev); 23080ac1d05SSasha Levin 23180ac1d05SSasha Levin rdev->pci_hdr = (struct pci_device_header) { 23280ac1d05SSasha Levin .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET, 23380ac1d05SSasha Levin .device_id = PCI_DEVICE_ID_VIRTIO_RNG, 23480ac1d05SSasha Levin .header_type = PCI_HEADER_TYPE_NORMAL, 23580ac1d05SSasha Levin .revision_id = 0, 23680ac1d05SSasha Levin .class = 0x010000, 23780ac1d05SSasha Levin .subsys_vendor_id = PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET, 23880ac1d05SSasha Levin .subsys_id = VIRTIO_ID_RNG, 23980ac1d05SSasha Levin .bar[0] = rdev_base_addr | PCI_BASE_ADDRESS_SPACE_IO, 240*bc485053SSasha Levin .bar[1] = rdev->msix_io_block | 241*bc485053SSasha Levin PCI_BASE_ADDRESS_SPACE_MEMORY | 242*bc485053SSasha Levin PCI_BASE_ADDRESS_MEM_TYPE_64, 243*bc485053SSasha Levin /* bar[2] is the continuation of bar[1] for 64bit addressing */ 244*bc485053SSasha Levin .bar[2] = 0, 245*bc485053SSasha Levin .status = PCI_STATUS_CAP_LIST, 246*bc485053SSasha Levin .capabilities = (void *)&rdev->pci_hdr.msix - (void *)&rdev->pci_hdr, 24780ac1d05SSasha Levin }; 24880ac1d05SSasha Levin 249*bc485053SSasha Levin rdev->pci_hdr.msix.cap = PCI_CAP_ID_MSIX; 250*bc485053SSasha Levin rdev->pci_hdr.msix.next = 0; 251*bc485053SSasha Levin rdev->pci_hdr.msix.table_size = (NUM_VIRT_QUEUES + 1) | PCI_MSIX_FLAGS_ENABLE; 252*bc485053SSasha Levin rdev->pci_hdr.msix.table_offset = 1; /* Use BAR 1 */ 253*bc485053SSasha Levin 254*bc485053SSasha Levin rdev->config_vector = 0; 25580ac1d05SSasha Levin rdev->base_addr = rdev_base_addr; 25680ac1d05SSasha Levin rdev->fd = open("/dev/urandom", O_RDONLY); 25780ac1d05SSasha Levin if (rdev->fd < 0) 258376ac44cSSasha Levin die("Failed initializing RNG"); 259376ac44cSSasha Levin 2600a7ab0c6SSasha Levin if (irq__register_device(VIRTIO_ID_RNG, &dev, &pin, &line) < 0) 2612449f6e3SSasha Levin return; 2622449f6e3SSasha Levin 26380ac1d05SSasha Levin rdev->pci_hdr.irq_pin = pin; 26480ac1d05SSasha Levin rdev->pci_hdr.irq_line = line; 26580ac1d05SSasha Levin pci__register(&rdev->pci_hdr, dev); 266376ac44cSSasha Levin 26780ac1d05SSasha Levin list_add_tail(&rdev->list, &rdevs); 268b2533581SSasha Levin 269b2533581SSasha Levin for (i = 0; i < NUM_VIRT_QUEUES; i++) { 270b2533581SSasha Levin ioevent = (struct ioevent) { 271b2533581SSasha Levin .io_addr = rdev_base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 272b2533581SSasha Levin .io_len = sizeof(u16), 273b2533581SSasha Levin .fn = ioevent_callback, 274b2533581SSasha Levin .fn_ptr = &rdev->jobs[i], 275b2533581SSasha Levin .datamatch = i, 276b2533581SSasha Levin .fn_kvm = kvm, 277b2533581SSasha Levin .fd = eventfd(0, 0), 278b2533581SSasha Levin }; 279b2533581SSasha Levin 280b2533581SSasha Levin ioeventfd__add_event(&ioevent); 281b2533581SSasha Levin } 28280ac1d05SSasha Levin } 28380ac1d05SSasha Levin 28480ac1d05SSasha Levin void virtio_rng__delete_all(struct kvm *kvm) 28580ac1d05SSasha Levin { 28680ac1d05SSasha Levin while (!list_empty(&rdevs)) { 28780ac1d05SSasha Levin struct rng_dev *rdev; 28880ac1d05SSasha Levin 28980ac1d05SSasha Levin rdev = list_first_entry(&rdevs, struct rng_dev, list); 29080ac1d05SSasha Levin list_del(&rdev->list); 291b2533581SSasha Levin ioeventfd__del_event(rdev->base_addr + VIRTIO_PCI_QUEUE_NOTIFY, 0); 29280ac1d05SSasha Levin free(rdev); 29380ac1d05SSasha Levin } 294376ac44cSSasha Levin } 295