xref: /kvmtool/virtio/console.c (revision 74af1456dfa0c3fb1c79529450c6130b54fd1c83)
1 #include "kvm/virtio-console.h"
2 #include "kvm/virtio-pci-dev.h"
3 #include "kvm/disk-image.h"
4 #include "kvm/virtio.h"
5 #include "kvm/ioport.h"
6 #include "kvm/util.h"
7 #include "kvm/term.h"
8 #include "kvm/mutex.h"
9 #include "kvm/kvm.h"
10 #include "kvm/pci.h"
11 #include "kvm/threadpool.h"
12 #include "kvm/irq.h"
13 #include "kvm/guest_compat.h"
14 
15 #include <linux/virtio_console.h>
16 #include <linux/virtio_ring.h>
17 #include <linux/virtio_blk.h>
18 
19 #include <sys/uio.h>
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <termios.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 
26 #define VIRTIO_CONSOLE_QUEUE_SIZE	128
27 #define VIRTIO_CONSOLE_NUM_QUEUES	2
28 #define VIRTIO_CONSOLE_RX_QUEUE		0
29 #define VIRTIO_CONSOLE_TX_QUEUE		1
30 
31 struct con_dev {
32 	struct mutex			mutex;
33 
34 	struct virtio_device		vdev;
35 	struct virt_queue		vqs[VIRTIO_CONSOLE_NUM_QUEUES];
36 	struct virtio_console_config	config;
37 	int				vq_ready;
38 
39 	struct thread_pool__job		jobs[VIRTIO_CONSOLE_NUM_QUEUES];
40 };
41 
42 static struct con_dev cdev = {
43 	.mutex				= MUTEX_INITIALIZER,
44 	.vq_ready			= 0,
45 };
46 
47 static int compat_id = -1;
48 
49 /*
50  * Interrupts are injected for hvc0 only.
51  */
virtio_console__inject_interrupt_callback(struct kvm * kvm,void * param)52 static void virtio_console__inject_interrupt_callback(struct kvm *kvm, void *param)
53 {
54 	struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
55 	struct virt_queue *vq;
56 	u16 out, in;
57 	u16 head;
58 	int len;
59 
60 	mutex_lock(&cdev.mutex);
61 
62 	vq = param;
63 
64 	if (term_readable(0) && virt_queue__available(vq)) {
65 		head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
66 		len = term_getc_iov(kvm, iov, in, 0);
67 		virt_queue__set_used_elem(vq, head, len);
68 		cdev.vdev.ops->signal_vq(kvm, &cdev.vdev, vq - cdev.vqs);
69 	}
70 
71 	mutex_unlock(&cdev.mutex);
72 }
73 
virtio_console__inject_interrupt(struct kvm * kvm)74 void virtio_console__inject_interrupt(struct kvm *kvm)
75 {
76 	if (kvm->cfg.active_console != CONSOLE_VIRTIO)
77 		return;
78 
79 	mutex_lock(&cdev.mutex);
80 	if (cdev.vq_ready)
81 		thread_pool__do_job(&cdev.jobs[VIRTIO_CONSOLE_RX_QUEUE]);
82 	mutex_unlock(&cdev.mutex);
83 }
84 
virtio_console_handle_callback(struct kvm * kvm,void * param)85 static void virtio_console_handle_callback(struct kvm *kvm, void *param)
86 {
87 	struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
88 	struct virt_queue *vq;
89 	u16 out, in;
90 	u16 head;
91 	u32 len;
92 
93 	vq = param;
94 
95 	/*
96 	 * The current Linux implementation polls for the buffer
97 	 * to be used, rather than waiting for an interrupt.
98 	 * So there is no need to inject an interrupt for the tx path.
99 	 */
100 
101 	while (virt_queue__available(vq)) {
102 		head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
103 		len = term_putc_iov(iov, out, 0);
104 		virt_queue__set_used_elem(vq, head, len);
105 	}
106 
107 }
108 
get_config(struct kvm * kvm,void * dev)109 static u8 *get_config(struct kvm *kvm, void *dev)
110 {
111 	struct con_dev *cdev = dev;
112 
113 	return ((u8 *)(&cdev->config));
114 }
115 
get_config_size(struct kvm * kvm,void * dev)116 static size_t get_config_size(struct kvm *kvm, void *dev)
117 {
118 	struct con_dev *cdev = dev;
119 
120 	return sizeof(cdev->config);
121 }
122 
get_host_features(struct kvm * kvm,void * dev)123 static u64 get_host_features(struct kvm *kvm, void *dev)
124 {
125 	return 1 << VIRTIO_F_ANY_LAYOUT;
126 }
127 
notify_status(struct kvm * kvm,void * dev,u32 status)128 static void notify_status(struct kvm *kvm, void *dev, u32 status)
129 {
130 	struct con_dev *cdev = dev;
131 	struct virtio_console_config *conf = &cdev->config;
132 
133 	if (!(status & VIRTIO__STATUS_CONFIG))
134 		return;
135 
136 	conf->cols = virtio_host_to_guest_u16(cdev->vdev.endian, 80);
137 	conf->rows = virtio_host_to_guest_u16(cdev->vdev.endian, 24);
138 	conf->max_nr_ports = virtio_host_to_guest_u32(cdev->vdev.endian, 1);
139 }
140 
init_vq(struct kvm * kvm,void * dev,u32 vq)141 static int init_vq(struct kvm *kvm, void *dev, u32 vq)
142 {
143 	struct virt_queue *queue;
144 
145 	BUG_ON(vq >= VIRTIO_CONSOLE_NUM_QUEUES);
146 
147 	compat__remove_message(compat_id);
148 
149 	queue		= &cdev.vqs[vq];
150 
151 	virtio_init_device_vq(kvm, &cdev.vdev, queue, VIRTIO_CONSOLE_QUEUE_SIZE);
152 
153 	if (vq == VIRTIO_CONSOLE_TX_QUEUE) {
154 		thread_pool__init_job(&cdev.jobs[vq], kvm, virtio_console_handle_callback, queue);
155 	} else if (vq == VIRTIO_CONSOLE_RX_QUEUE) {
156 		thread_pool__init_job(&cdev.jobs[vq], kvm, virtio_console__inject_interrupt_callback, queue);
157 		/* Tell the waiting poll thread that we're ready to go */
158 		mutex_lock(&cdev.mutex);
159 		cdev.vq_ready = 1;
160 		mutex_unlock(&cdev.mutex);
161 	}
162 
163 	return 0;
164 }
165 
exit_vq(struct kvm * kvm,void * dev,u32 vq)166 static void exit_vq(struct kvm *kvm, void *dev, u32 vq)
167 {
168 	if (vq == VIRTIO_CONSOLE_RX_QUEUE) {
169 		mutex_lock(&cdev.mutex);
170 		cdev.vq_ready = 0;
171 		mutex_unlock(&cdev.mutex);
172 		thread_pool__cancel_job(&cdev.jobs[vq]);
173 	} else if (vq == VIRTIO_CONSOLE_TX_QUEUE) {
174 		thread_pool__cancel_job(&cdev.jobs[vq]);
175 	}
176 }
177 
notify_vq(struct kvm * kvm,void * dev,u32 vq)178 static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
179 {
180 	struct con_dev *cdev = dev;
181 
182 	thread_pool__do_job(&cdev->jobs[vq]);
183 
184 	return 0;
185 }
186 
get_vq(struct kvm * kvm,void * dev,u32 vq)187 static struct virt_queue *get_vq(struct kvm *kvm, void *dev, u32 vq)
188 {
189 	struct con_dev *cdev = dev;
190 
191 	return &cdev->vqs[vq];
192 }
193 
get_size_vq(struct kvm * kvm,void * dev,u32 vq)194 static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
195 {
196 	return VIRTIO_CONSOLE_QUEUE_SIZE;
197 }
198 
set_size_vq(struct kvm * kvm,void * dev,u32 vq,int size)199 static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
200 {
201 	/* FIXME: dynamic */
202 	return size;
203 }
204 
get_vq_count(struct kvm * kvm,void * dev)205 static unsigned int get_vq_count(struct kvm *kvm, void *dev)
206 {
207 	return VIRTIO_CONSOLE_NUM_QUEUES;
208 }
209 
210 static struct virtio_ops con_dev_virtio_ops = {
211 	.get_config		= get_config,
212 	.get_config_size	= get_config_size,
213 	.get_host_features	= get_host_features,
214 	.get_vq_count		= get_vq_count,
215 	.init_vq		= init_vq,
216 	.exit_vq		= exit_vq,
217 	.notify_status		= notify_status,
218 	.notify_vq		= notify_vq,
219 	.get_vq			= get_vq,
220 	.get_size_vq		= get_size_vq,
221 	.set_size_vq		= set_size_vq,
222 };
223 
virtio_console__init(struct kvm * kvm)224 int virtio_console__init(struct kvm *kvm)
225 {
226 	int r;
227 
228 	if (kvm->cfg.active_console != CONSOLE_VIRTIO)
229 		return 0;
230 
231 	r = virtio_init(kvm, &cdev, &cdev.vdev, &con_dev_virtio_ops,
232 			kvm->cfg.virtio_transport, PCI_DEVICE_ID_VIRTIO_CONSOLE,
233 			VIRTIO_ID_CONSOLE, PCI_CLASS_CONSOLE);
234 	if (r < 0)
235 		return r;
236 
237 	if (compat_id == -1)
238 		compat_id = virtio_compat_add_message("virtio-console", "CONFIG_VIRTIO_CONSOLE");
239 
240 	return 0;
241 }
242 virtio_dev_init(virtio_console__init);
243 
virtio_console__exit(struct kvm * kvm)244 int virtio_console__exit(struct kvm *kvm)
245 {
246 	virtio_exit(kvm, &cdev.vdev);
247 
248 	return 0;
249 }
250 virtio_dev_exit(virtio_console__exit);
251