1*60273720SAsias He #include "kvm/virtio-mmio.h" 2*60273720SAsias He #include "kvm/ioeventfd.h" 3*60273720SAsias He #include "kvm/ioport.h" 4*60273720SAsias He #include "kvm/virtio.h" 5*60273720SAsias He #include "kvm/kvm.h" 6*60273720SAsias He #include "kvm/irq.h" 7*60273720SAsias He 8*60273720SAsias He #include <linux/virtio_mmio.h> 9*60273720SAsias He #include <string.h> 10*60273720SAsias He 11*60273720SAsias He static u32 virtio_mmio_io_space_blocks = KVM_VIRTIO_MMIO_AREA; 12*60273720SAsias He 13*60273720SAsias He static u32 virtio_mmio_get_io_space_block(u32 size) 14*60273720SAsias He { 15*60273720SAsias He u32 block = virtio_mmio_io_space_blocks; 16*60273720SAsias He virtio_mmio_io_space_blocks += size; 17*60273720SAsias He 18*60273720SAsias He return block; 19*60273720SAsias He } 20*60273720SAsias He 21*60273720SAsias He static void virtio_mmio_ioevent_callback(struct kvm *kvm, void *param) 22*60273720SAsias He { 23*60273720SAsias He struct virtio_mmio_ioevent_param *ioeventfd = param; 24*60273720SAsias He struct virtio_mmio *vmmio = ioeventfd->vdev->virtio; 25*60273720SAsias He 26*60273720SAsias He ioeventfd->vdev->ops->notify_vq(kvm, vmmio->dev, ioeventfd->vq); 27*60273720SAsias He } 28*60273720SAsias He 29*60273720SAsias He static int virtio_mmio_init_ioeventfd(struct kvm *kvm, 30*60273720SAsias He struct virtio_device *vdev, u32 vq) 31*60273720SAsias He { 32*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 33*60273720SAsias He struct ioevent ioevent; 34*60273720SAsias He int err; 35*60273720SAsias He 36*60273720SAsias He vmmio->ioeventfds[vq] = (struct virtio_mmio_ioevent_param) { 37*60273720SAsias He .vdev = vdev, 38*60273720SAsias He .vq = vq, 39*60273720SAsias He }; 40*60273720SAsias He 41*60273720SAsias He ioevent = (struct ioevent) { 42*60273720SAsias He .io_addr = vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY, 43*60273720SAsias He .io_len = sizeof(u32), 44*60273720SAsias He .fn = virtio_mmio_ioevent_callback, 45*60273720SAsias He .fn_ptr = &vmmio->ioeventfds[vq], 46*60273720SAsias He .datamatch = vq, 47*60273720SAsias He .fn_kvm = kvm, 48*60273720SAsias He .fd = eventfd(0, 0), 49*60273720SAsias He }; 50*60273720SAsias He 51*60273720SAsias He err = ioeventfd__add_event(&ioevent, false); 52*60273720SAsias He if (err) 53*60273720SAsias He return err; 54*60273720SAsias He 55*60273720SAsias He if (vdev->ops->notify_vq_eventfd) 56*60273720SAsias He vdev->ops->notify_vq_eventfd(kvm, vmmio->dev, vq, ioevent.fd); 57*60273720SAsias He 58*60273720SAsias He return 0; 59*60273720SAsias He } 60*60273720SAsias He 61*60273720SAsias He int virtio_mmio_signal_vq(struct kvm *kvm, struct virtio_device *vdev, u32 vq) 62*60273720SAsias He { 63*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 64*60273720SAsias He 65*60273720SAsias He vmmio->hdr.interrupt_state |= VIRTIO_MMIO_INT_VRING; 66*60273720SAsias He kvm__irq_trigger(vmmio->kvm, vmmio->irq); 67*60273720SAsias He 68*60273720SAsias He return 0; 69*60273720SAsias He } 70*60273720SAsias He 71*60273720SAsias He int virtio_mmio_signal_config(struct kvm *kvm, struct virtio_device *vdev) 72*60273720SAsias He { 73*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 74*60273720SAsias He 75*60273720SAsias He vmmio->hdr.interrupt_state |= VIRTIO_MMIO_INT_CONFIG; 76*60273720SAsias He kvm__irq_trigger(vmmio->kvm, vmmio->irq); 77*60273720SAsias He 78*60273720SAsias He return 0; 79*60273720SAsias He } 80*60273720SAsias He 81*60273720SAsias He static void virtio_mmio_device_specific(u64 addr, u8 *data, u32 len, 82*60273720SAsias He u8 is_write, struct virtio_device *vdev) 83*60273720SAsias He { 84*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 85*60273720SAsias He u32 i; 86*60273720SAsias He 87*60273720SAsias He for (i = 0; i < len; i++) { 88*60273720SAsias He if (is_write) 89*60273720SAsias He vdev->ops->set_config(vmmio->kvm, vmmio->dev, 90*60273720SAsias He *(u8 *)data + i, addr + i); 91*60273720SAsias He else 92*60273720SAsias He data[i] = vdev->ops->get_config(vmmio->kvm, 93*60273720SAsias He vmmio->dev, addr + i); 94*60273720SAsias He } 95*60273720SAsias He } 96*60273720SAsias He 97*60273720SAsias He static void virtio_mmio_config_in(u64 addr, void *data, u32 len, 98*60273720SAsias He struct virtio_device *vdev) 99*60273720SAsias He { 100*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 101*60273720SAsias He u32 val = 0; 102*60273720SAsias He 103*60273720SAsias He switch (addr) { 104*60273720SAsias He case VIRTIO_MMIO_MAGIC_VALUE: 105*60273720SAsias He case VIRTIO_MMIO_VERSION: 106*60273720SAsias He case VIRTIO_MMIO_DEVICE_ID: 107*60273720SAsias He case VIRTIO_MMIO_VENDOR_ID: 108*60273720SAsias He case VIRTIO_MMIO_STATUS: 109*60273720SAsias He case VIRTIO_MMIO_INTERRUPT_STATUS: 110*60273720SAsias He ioport__write32(data, *(u32 *)(((void *)&vmmio->hdr) + addr)); 111*60273720SAsias He break; 112*60273720SAsias He case VIRTIO_MMIO_HOST_FEATURES: 113*60273720SAsias He if (vmmio->hdr.host_features_sel == 0) 114*60273720SAsias He val = vdev->ops->get_host_features(vmmio->kvm, 115*60273720SAsias He vmmio->dev); 116*60273720SAsias He ioport__write32(data, val); 117*60273720SAsias He break; 118*60273720SAsias He case VIRTIO_MMIO_QUEUE_PFN: 119*60273720SAsias He val = vdev->ops->get_pfn_vq(vmmio->kvm, vmmio->dev, 120*60273720SAsias He vmmio->hdr.queue_sel); 121*60273720SAsias He ioport__write32(data, val); 122*60273720SAsias He break; 123*60273720SAsias He case VIRTIO_MMIO_QUEUE_NUM_MAX: 124*60273720SAsias He val = vdev->ops->get_size_vq(vmmio->kvm, vmmio->dev, 125*60273720SAsias He vmmio->hdr.queue_sel); 126*60273720SAsias He ioport__write32(data, val); 127*60273720SAsias He break; 128*60273720SAsias He default: 129*60273720SAsias He break; 130*60273720SAsias He } 131*60273720SAsias He } 132*60273720SAsias He 133*60273720SAsias He static void virtio_mmio_config_out(u64 addr, void *data, u32 len, 134*60273720SAsias He struct virtio_device *vdev) 135*60273720SAsias He { 136*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 137*60273720SAsias He u32 val = 0; 138*60273720SAsias He 139*60273720SAsias He switch (addr) { 140*60273720SAsias He case VIRTIO_MMIO_HOST_FEATURES_SEL: 141*60273720SAsias He case VIRTIO_MMIO_GUEST_FEATURES_SEL: 142*60273720SAsias He case VIRTIO_MMIO_QUEUE_SEL: 143*60273720SAsias He case VIRTIO_MMIO_STATUS: 144*60273720SAsias He val = ioport__read32(data); 145*60273720SAsias He *(u32 *)(((void *)&vmmio->hdr) + addr) = val; 146*60273720SAsias He break; 147*60273720SAsias He case VIRTIO_MMIO_GUEST_FEATURES: 148*60273720SAsias He if (vmmio->hdr.guest_features_sel == 0) { 149*60273720SAsias He val = ioport__read32(data); 150*60273720SAsias He vdev->ops->set_guest_features(vmmio->kvm, 151*60273720SAsias He vmmio->dev, val); 152*60273720SAsias He } 153*60273720SAsias He break; 154*60273720SAsias He case VIRTIO_MMIO_GUEST_PAGE_SIZE: 155*60273720SAsias He val = ioport__read32(data); 156*60273720SAsias He vmmio->hdr.guest_page_size = val; 157*60273720SAsias He /* FIXME: set guest page size */ 158*60273720SAsias He break; 159*60273720SAsias He case VIRTIO_MMIO_QUEUE_NUM: 160*60273720SAsias He val = ioport__read32(data); 161*60273720SAsias He vmmio->hdr.queue_num = val; 162*60273720SAsias He /* FIXME: set vq size */ 163*60273720SAsias He vdev->ops->set_size_vq(vmmio->kvm, vmmio->dev, 164*60273720SAsias He vmmio->hdr.queue_sel, val); 165*60273720SAsias He break; 166*60273720SAsias He case VIRTIO_MMIO_QUEUE_ALIGN: 167*60273720SAsias He val = ioport__read32(data); 168*60273720SAsias He vmmio->hdr.queue_align = val; 169*60273720SAsias He /* FIXME: set used ring alignment */ 170*60273720SAsias He break; 171*60273720SAsias He case VIRTIO_MMIO_QUEUE_PFN: 172*60273720SAsias He val = ioport__read32(data); 173*60273720SAsias He virtio_mmio_init_ioeventfd(vmmio->kvm, vdev, vmmio->hdr.queue_sel); 174*60273720SAsias He vdev->ops->init_vq(vmmio->kvm, vmmio->dev, 175*60273720SAsias He vmmio->hdr.queue_sel, val); 176*60273720SAsias He break; 177*60273720SAsias He case VIRTIO_MMIO_QUEUE_NOTIFY: 178*60273720SAsias He val = ioport__read32(data); 179*60273720SAsias He vdev->ops->notify_vq(vmmio->kvm, vmmio->dev, val); 180*60273720SAsias He break; 181*60273720SAsias He case VIRTIO_MMIO_INTERRUPT_ACK: 182*60273720SAsias He val = ioport__read32(data); 183*60273720SAsias He vmmio->hdr.interrupt_state &= ~val; 184*60273720SAsias He break; 185*60273720SAsias He default: 186*60273720SAsias He break; 187*60273720SAsias He }; 188*60273720SAsias He } 189*60273720SAsias He 190*60273720SAsias He static void virtio_mmio_mmio_callback(u64 addr, u8 *data, u32 len, 191*60273720SAsias He u8 is_write, void *ptr) 192*60273720SAsias He { 193*60273720SAsias He struct virtio_device *vdev = ptr; 194*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 195*60273720SAsias He u32 offset = addr - vmmio->addr; 196*60273720SAsias He 197*60273720SAsias He if (offset >= VIRTIO_MMIO_CONFIG) { 198*60273720SAsias He offset -= VIRTIO_MMIO_CONFIG; 199*60273720SAsias He virtio_mmio_device_specific(offset, data, len, is_write, ptr); 200*60273720SAsias He return; 201*60273720SAsias He } 202*60273720SAsias He 203*60273720SAsias He if (is_write) 204*60273720SAsias He virtio_mmio_config_out(offset, data, len, ptr); 205*60273720SAsias He else 206*60273720SAsias He virtio_mmio_config_in(offset, data, len, ptr); 207*60273720SAsias He } 208*60273720SAsias He 209*60273720SAsias He int virtio_mmio_init(struct kvm *kvm, void *dev, struct virtio_device *vdev, 210*60273720SAsias He int device_id, int subsys_id, int class) 211*60273720SAsias He { 212*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 213*60273720SAsias He u8 device, pin, line; 214*60273720SAsias He 215*60273720SAsias He vmmio->addr = virtio_mmio_get_io_space_block(VIRTIO_MMIO_IO_SIZE); 216*60273720SAsias He vmmio->kvm = kvm; 217*60273720SAsias He vmmio->dev = dev; 218*60273720SAsias He 219*60273720SAsias He kvm__register_mmio(kvm, vmmio->addr, VIRTIO_MMIO_IO_SIZE, 220*60273720SAsias He false, virtio_mmio_mmio_callback, vdev); 221*60273720SAsias He 222*60273720SAsias He vmmio->hdr = (struct virtio_mmio_hdr) { 223*60273720SAsias He .magic = {'v', 'i', 'r', 't'}, 224*60273720SAsias He .version = 1, 225*60273720SAsias He .device_id = device_id - 0x1000 + 1, 226*60273720SAsias He .vendor_id = 0x4d564b4c , /* 'LKVM' */ 227*60273720SAsias He .queue_num_max = 256, 228*60273720SAsias He }; 229*60273720SAsias He 230*60273720SAsias He if (irq__register_device(subsys_id, &device, &pin, &line) < 0) 231*60273720SAsias He return -1; 232*60273720SAsias He vmmio->irq = line; 233*60273720SAsias He 234*60273720SAsias He /* 235*60273720SAsias He * Instantiate guest virtio-mmio devices using kernel command line 236*60273720SAsias He * (or module) parameter, e.g 237*60273720SAsias He * 238*60273720SAsias He * virtio_mmio.devices=0x200@0xd2000000:5,0x200@0xd2000200:6 239*60273720SAsias He */ 240*60273720SAsias He pr_info("virtio-mmio.devices=0x%x@0x%x:%d\n", VIRTIO_MMIO_IO_SIZE, vmmio->addr, line); 241*60273720SAsias He 242*60273720SAsias He return 0; 243*60273720SAsias He } 244*60273720SAsias He 245*60273720SAsias He int virtio_mmio_exit(struct kvm *kvm, struct virtio_device *vdev) 246*60273720SAsias He { 247*60273720SAsias He struct virtio_mmio *vmmio = vdev->virtio; 248*60273720SAsias He int i; 249*60273720SAsias He 250*60273720SAsias He kvm__deregister_mmio(kvm, vmmio->addr); 251*60273720SAsias He 252*60273720SAsias He for (i = 0; i < VIRTIO_MMIO_MAX_VQ; i++) 253*60273720SAsias He ioeventfd__del_event(vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY, i); 254*60273720SAsias He 255*60273720SAsias He return 0; 256*60273720SAsias He } 257