1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uio_hv_generic - generic UIO driver for VMBus
4 *
5 * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
6 * Copyright (c) 2016, Microsoft Corporation.
7 *
8 * Since the driver does not declare any device ids, you must allocate
9 * id and bind the device to the driver yourself. For example:
10 *
11 * Associate Network GUID with UIO device
12 * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
13 * > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
14 * Then rebind
15 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
16 * > /sys/bus/vmbus/drivers/hv_netvsc/unbind
17 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
18 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
19 */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/device.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/uio_driver.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_ether.h>
28 #include <linux/skbuff.h>
29 #include <linux/hyperv.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32
33 #include "../hv/hyperv_vmbus.h"
34
35 #define DRIVER_VERSION "0.02.1"
36 #define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
37 #define DRIVER_DESC "Generic UIO driver for VMBus devices"
38
39 #define SEND_BUFFER_SIZE (16 * 1024 * 1024)
40 #define RECV_BUFFER_SIZE (31 * 1024 * 1024)
41
42 /*
43 * List of resources to be mapped to user space
44 * can be extended up to MAX_UIO_MAPS(5) items
45 */
46 enum hv_uio_map {
47 TXRX_RING_MAP = 0,
48 INT_PAGE_MAP,
49 MON_PAGE_MAP,
50 RECV_BUF_MAP,
51 SEND_BUF_MAP
52 };
53
54 struct hv_uio_private_data {
55 struct uio_info info;
56 struct hv_device *device;
57 atomic_t refcnt;
58
59 void *recv_buf;
60 struct vmbus_gpadl recv_gpadl;
61 char recv_name[32]; /* "recv_4294967295" */
62
63 void *send_buf;
64 struct vmbus_gpadl send_gpadl;
65 char send_name[32];
66 };
67
set_event(struct vmbus_channel * channel,s32 irq_state)68 static void set_event(struct vmbus_channel *channel, s32 irq_state)
69 {
70 channel->inbound.ring_buffer->interrupt_mask = !irq_state;
71 if (!channel->offermsg.monitor_allocated && irq_state) {
72 /* MB is needed for host to see the interrupt mask first */
73 virt_mb();
74 vmbus_set_event(channel);
75 }
76 }
77
78 /*
79 * This is the irqcontrol callback to be registered to uio_info.
80 * It can be used to disable/enable interrupt from user space processes.
81 *
82 * @param info
83 * pointer to uio_info.
84 * @param irq_state
85 * state value. 1 to enable interrupt, 0 to disable interrupt.
86 */
87 static int
hv_uio_irqcontrol(struct uio_info * info,s32 irq_state)88 hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
89 {
90 struct hv_uio_private_data *pdata = info->priv;
91 struct hv_device *dev = pdata->device;
92 struct vmbus_channel *primary, *sc;
93
94 primary = dev->channel;
95 set_event(primary, irq_state);
96
97 mutex_lock(&vmbus_connection.channel_mutex);
98 list_for_each_entry(sc, &primary->sc_list, sc_list)
99 set_event(sc, irq_state);
100 mutex_unlock(&vmbus_connection.channel_mutex);
101
102 return 0;
103 }
104
105 /*
106 * Callback from vmbus_event when something is in inbound ring.
107 */
hv_uio_channel_cb(void * context)108 static void hv_uio_channel_cb(void *context)
109 {
110 struct vmbus_channel *chan = context;
111 struct hv_device *hv_dev;
112 struct hv_uio_private_data *pdata;
113
114 virt_mb();
115
116 /*
117 * The callback may come from a subchannel, in which case look
118 * for the hv device in the primary channel
119 */
120 hv_dev = chan->primary_channel ?
121 chan->primary_channel->device_obj : chan->device_obj;
122 pdata = hv_get_drvdata(hv_dev);
123 uio_event_notify(&pdata->info);
124 }
125
126 /*
127 * Callback from vmbus_event when channel is rescinded.
128 * It is meant for rescind of primary channels only.
129 */
hv_uio_rescind(struct vmbus_channel * channel)130 static void hv_uio_rescind(struct vmbus_channel *channel)
131 {
132 struct hv_device *hv_dev = channel->device_obj;
133 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
134
135 /*
136 * Turn off the interrupt file handle
137 * Next read for event will return -EIO
138 */
139 pdata->info.irq = 0;
140
141 /* Wake up reader */
142 uio_event_notify(&pdata->info);
143
144 /*
145 * With rescind callback registered, rescind path will not unregister the device
146 * from vmbus when the primary channel is rescinded.
147 * Without it, rescind handling is incomplete and next onoffer msg does not come.
148 * Unregister the device from vmbus here.
149 */
150 vmbus_device_unregister(channel->device_obj);
151 }
152
153 /* Function used for mmap of ring buffer sysfs interface.
154 * The ring buffer is allocated as contiguous memory by vmbus_open
155 */
156 static int
hv_uio_ring_mmap(struct vmbus_channel * channel,struct vm_area_struct * vma)157 hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma)
158 {
159 void *ring_buffer = page_address(channel->ringbuffer_page);
160
161 if (channel->state != CHANNEL_OPENED_STATE)
162 return -ENODEV;
163
164 return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
165 channel->ringbuffer_pagecount << PAGE_SHIFT);
166 }
167
168 /* Callback from VMBUS subsystem when new channel created. */
169 static void
hv_uio_new_channel(struct vmbus_channel * new_sc)170 hv_uio_new_channel(struct vmbus_channel *new_sc)
171 {
172 struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
173 struct device *device = &hv_dev->device;
174 const size_t ring_bytes = SZ_2M;
175 int ret;
176
177 /* Create host communication ring */
178 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
179 hv_uio_channel_cb, new_sc);
180 if (ret) {
181 dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
182 return;
183 }
184
185 set_channel_read_mode(new_sc, HV_CALL_ISR);
186 ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
187 if (ret) {
188 dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
189 vmbus_close(new_sc);
190 }
191 }
192
193 /* free the reserved buffers for send and receive */
194 static void
hv_uio_cleanup(struct hv_device * dev,struct hv_uio_private_data * pdata)195 hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
196 {
197 if (pdata->send_gpadl.gpadl_handle) {
198 vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
199 if (!pdata->send_gpadl.decrypted)
200 vfree(pdata->send_buf);
201 }
202
203 if (pdata->recv_gpadl.gpadl_handle) {
204 vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
205 if (!pdata->recv_gpadl.decrypted)
206 vfree(pdata->recv_buf);
207 }
208 }
209
210 /* VMBus primary channel is opened on first use */
211 static int
hv_uio_open(struct uio_info * info,struct inode * inode)212 hv_uio_open(struct uio_info *info, struct inode *inode)
213 {
214 struct hv_uio_private_data *pdata
215 = container_of(info, struct hv_uio_private_data, info);
216 struct hv_device *dev = pdata->device;
217 int ret;
218
219 if (atomic_inc_return(&pdata->refcnt) != 1)
220 return 0;
221
222 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
223 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
224
225 ret = vmbus_connect_ring(dev->channel,
226 hv_uio_channel_cb, dev->channel);
227 if (ret)
228 atomic_dec(&pdata->refcnt);
229
230 return ret;
231 }
232
233 /* VMBus primary channel is closed on last close */
234 static int
hv_uio_release(struct uio_info * info,struct inode * inode)235 hv_uio_release(struct uio_info *info, struct inode *inode)
236 {
237 struct hv_uio_private_data *pdata
238 = container_of(info, struct hv_uio_private_data, info);
239 struct hv_device *dev = pdata->device;
240 int ret = 0;
241
242 if (atomic_dec_and_test(&pdata->refcnt))
243 ret = vmbus_disconnect_ring(dev->channel);
244
245 return ret;
246 }
247
248 static int
hv_uio_probe(struct hv_device * dev,const struct hv_vmbus_device_id * dev_id)249 hv_uio_probe(struct hv_device *dev,
250 const struct hv_vmbus_device_id *dev_id)
251 {
252 struct vmbus_channel *channel = dev->channel;
253 struct hv_uio_private_data *pdata;
254 void *ring_buffer;
255 int ret;
256 size_t ring_size = hv_dev_ring_size(channel);
257
258 if (!ring_size)
259 ring_size = SZ_2M;
260
261 /* Adjust ring size if necessary to have it page aligned */
262 ring_size = VMBUS_RING_SIZE(ring_size);
263
264 pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
265 if (!pdata)
266 return -ENOMEM;
267
268 ret = vmbus_alloc_ring(channel, ring_size, ring_size);
269 if (ret)
270 return ret;
271
272 set_channel_read_mode(channel, HV_CALL_ISR);
273
274 /* Fill general uio info */
275 pdata->info.name = "uio_hv_generic";
276 pdata->info.version = DRIVER_VERSION;
277 pdata->info.irqcontrol = hv_uio_irqcontrol;
278 pdata->info.open = hv_uio_open;
279 pdata->info.release = hv_uio_release;
280 pdata->info.irq = UIO_IRQ_CUSTOM;
281 atomic_set(&pdata->refcnt, 0);
282
283 /* mem resources */
284 pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
285 ring_buffer = page_address(channel->ringbuffer_page);
286 pdata->info.mem[TXRX_RING_MAP].addr
287 = (uintptr_t)virt_to_phys(ring_buffer);
288 pdata->info.mem[TXRX_RING_MAP].size
289 = channel->ringbuffer_pagecount << PAGE_SHIFT;
290 pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
291
292 pdata->info.mem[INT_PAGE_MAP].name = "int_page";
293 pdata->info.mem[INT_PAGE_MAP].addr
294 = (uintptr_t)vmbus_connection.int_page;
295 pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
296 pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
297
298 pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
299 pdata->info.mem[MON_PAGE_MAP].addr
300 = (uintptr_t)vmbus_connection.monitor_pages[1];
301 pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
302 pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
303
304 if (channel->device_id == HV_NIC) {
305 pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
306 if (!pdata->recv_buf) {
307 ret = -ENOMEM;
308 goto fail_free_ring;
309 }
310
311 ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
312 RECV_BUFFER_SIZE, &pdata->recv_gpadl);
313 if (ret) {
314 if (!pdata->recv_gpadl.decrypted)
315 vfree(pdata->recv_buf);
316 goto fail_close;
317 }
318
319 /* put Global Physical Address Label in name */
320 snprintf(pdata->recv_name, sizeof(pdata->recv_name),
321 "recv:%u", pdata->recv_gpadl.gpadl_handle);
322 pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
323 pdata->info.mem[RECV_BUF_MAP].addr = (uintptr_t)pdata->recv_buf;
324 pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
325 pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
326
327 pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
328 if (!pdata->send_buf) {
329 ret = -ENOMEM;
330 goto fail_close;
331 }
332
333 ret = vmbus_establish_gpadl(channel, pdata->send_buf,
334 SEND_BUFFER_SIZE, &pdata->send_gpadl);
335 if (ret) {
336 if (!pdata->send_gpadl.decrypted)
337 vfree(pdata->send_buf);
338 goto fail_close;
339 }
340
341 snprintf(pdata->send_name, sizeof(pdata->send_name),
342 "send:%u", pdata->send_gpadl.gpadl_handle);
343 pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
344 pdata->info.mem[SEND_BUF_MAP].addr = (uintptr_t)pdata->send_buf;
345 pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
346 pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
347 }
348
349 pdata->info.priv = pdata;
350 pdata->device = dev;
351
352 ret = uio_register_device(&dev->device, &pdata->info);
353 if (ret) {
354 dev_err(&dev->device, "hv_uio register failed\n");
355 goto fail_close;
356 }
357
358 /*
359 * This internally calls sysfs_update_group, which returns a non-zero value if it executes
360 * before sysfs_create_group. This is expected as the 'ring' will be created later in
361 * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return
362 * value and print warning.
363 *
364 * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race
365 * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed
366 * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify
367 * APIs to make sure that ring is created.
368 */
369 hv_create_ring_sysfs(channel, hv_uio_ring_mmap);
370
371 hv_set_drvdata(dev, pdata);
372
373 return 0;
374
375 fail_close:
376 hv_uio_cleanup(dev, pdata);
377 fail_free_ring:
378 vmbus_free_ring(dev->channel);
379
380 return ret;
381 }
382
383 static void
hv_uio_remove(struct hv_device * dev)384 hv_uio_remove(struct hv_device *dev)
385 {
386 struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
387
388 if (!pdata)
389 return;
390
391 hv_remove_ring_sysfs(dev->channel);
392 uio_unregister_device(&pdata->info);
393 hv_uio_cleanup(dev, pdata);
394
395 vmbus_free_ring(dev->channel);
396 }
397
398 static struct hv_driver hv_uio_drv = {
399 .name = "uio_hv_generic",
400 .id_table = NULL, /* only dynamic id's */
401 .probe = hv_uio_probe,
402 .remove = hv_uio_remove,
403 };
404
405 static int __init
hyperv_module_init(void)406 hyperv_module_init(void)
407 {
408 return vmbus_driver_register(&hv_uio_drv);
409 }
410
411 static void __exit
hyperv_module_exit(void)412 hyperv_module_exit(void)
413 {
414 vmbus_driver_unregister(&hv_uio_drv);
415 }
416
417 module_init(hyperv_module_init);
418 module_exit(hyperv_module_exit);
419
420 MODULE_VERSION(DRIVER_VERSION);
421 MODULE_LICENSE("GPL v2");
422 MODULE_AUTHOR(DRIVER_AUTHOR);
423 MODULE_DESCRIPTION(DRIVER_DESC);
424