Lines Matching refs:u

96 static unsigned int evtchn_ring_offset(struct per_user_data *u,
99 return idx & (u->ring_size - 1);
102 static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
105 return u->ring + evtchn_ring_offset(u, idx);
108 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
110 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
112 u->nr_evtchns++;
130 rb_insert_color(&evtchn->node, &u->evtchns);
135 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
137 u->nr_evtchns--;
138 rb_erase(&evtchn->node, &u->evtchns);
142 static struct user_evtchn *find_evtchn(struct per_user_data *u,
145 struct rb_node *node = u->evtchns.rb_node;
165 struct per_user_data *u = evtchn->user;
173 "Interrupt for port %u, but apparently not enabled; per-user %p\n",
174 evtchn->port, u);
178 spin_lock(&u->ring_prod_lock);
180 prod = READ_ONCE(u->ring_prod);
181 cons = READ_ONCE(u->ring_cons);
183 if ((prod - cons) < u->ring_size) {
184 *evtchn_ring_entry(u, prod) = evtchn->port;
186 WRITE_ONCE(u->ring_prod, prod + 1);
188 wake_up_interruptible(&u->evtchn_wait);
189 kill_fasync(&u->evtchn_async_queue,
193 u->ring_overflow = 1;
195 spin_unlock(&u->ring_prod_lock);
205 struct per_user_data *u = file->private_data;
217 mutex_lock(&u->ring_cons_mutex);
220 if (u->ring_overflow)
223 c = READ_ONCE(u->ring_cons);
224 p = READ_ONCE(u->ring_prod);
228 mutex_unlock(&u->ring_cons_mutex);
233 rc = wait_event_interruptible(u->evtchn_wait,
234 READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod));
240 if (((c ^ p) & u->ring_size) != 0) {
241 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
243 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
259 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
261 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
264 WRITE_ONCE(u->ring_cons, c + (bytes1 + bytes2) / sizeof(evtchn_port_t));
268 mutex_unlock(&u->ring_cons_mutex);
277 struct per_user_data *u = file->private_data;
296 mutex_lock(&u->bind_mutex);
302 evtchn = find_evtchn(u, port);
309 mutex_unlock(&u->bind_mutex);
318 static int evtchn_resize_ring(struct per_user_data *u)
327 if (u->nr_evtchns <= u->ring_size)
330 if (u->ring_size == 0)
333 new_size = 2 * u->ring_size;
339 old_ring = u->ring;
345 mutex_lock(&u->ring_cons_mutex);
346 spin_lock_irq(&u->ring_prod_lock);
359 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, old_ring,
361 u->ring_size * sizeof(*u->ring));
363 u->ring = new_ring;
364 u->ring_size = new_size;
366 spin_unlock_irq(&u->ring_prod_lock);
367 mutex_unlock(&u->ring_cons_mutex);
374 static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
393 evtchn->user = u;
397 rc = add_evtchn(u, evtchn);
401 rc = evtchn_resize_ring(u);
406 u->name, evtchn);
418 del_evtchn(u, evtchn);
422 static void evtchn_unbind_from_user(struct per_user_data *u,
432 del_evtchn(u, evtchn);
439 struct per_user_data *u = file->private_data;
443 mutex_lock(&u->bind_mutex);
451 if (u->restrict_domid != UNRESTRICTED_DOMID)
465 rc = evtchn_bind_to_user(u, bind_virq.port, false);
480 if (u->restrict_domid != UNRESTRICTED_DOMID &&
481 u->restrict_domid != bind.remote_domain)
491 rc = evtchn_bind_to_user(u, bind_interdomain.local_port, false);
502 if (u->restrict_domid != UNRESTRICTED_DOMID)
516 rc = evtchn_bind_to_user(u, alloc_unbound.port, false);
535 evtchn = find_evtchn(u, unbind.port);
540 evtchn_unbind_from_user(u, evtchn);
554 evtchn = find_evtchn(u, bind.port);
558 rc = evtchn_bind_to_user(u, bind.port, true);
571 evtchn = find_evtchn(u, notify.port);
581 mutex_lock(&u->ring_cons_mutex);
582 spin_lock_irq(&u->ring_prod_lock);
583 WRITE_ONCE(u->ring_cons, 0);
584 WRITE_ONCE(u->ring_prod, 0);
585 u->ring_overflow = 0;
586 spin_unlock_irq(&u->ring_prod_lock);
587 mutex_unlock(&u->ring_cons_mutex);
596 if (u->restrict_domid != UNRESTRICTED_DOMID)
607 u->restrict_domid = ierd.domid;
617 mutex_unlock(&u->bind_mutex);
625 struct per_user_data *u = file->private_data;
627 poll_wait(file, &u->evtchn_wait, wait);
628 if (READ_ONCE(u->ring_cons) != READ_ONCE(u->ring_prod))
630 if (u->ring_overflow)
637 struct per_user_data *u = filp->private_data;
638 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
643 struct per_user_data *u;
645 u = kzalloc(sizeof(*u), GFP_KERNEL);
646 if (u == NULL)
649 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
650 if (u->name == NULL) {
651 kfree(u);
655 init_waitqueue_head(&u->evtchn_wait);
657 mutex_init(&u->bind_mutex);
658 mutex_init(&u->ring_cons_mutex);
659 spin_lock_init(&u->ring_prod_lock);
661 u->restrict_domid = UNRESTRICTED_DOMID;
663 filp->private_data = u;
670 struct per_user_data *u = filp->private_data;
673 while ((node = u->evtchns.rb_node)) {
678 evtchn_unbind_from_user(u, evtchn);
681 evtchn_free_ring(u->ring);
682 kfree(u->name);
683 kfree(u);