Lines Matching full:call
2 /* incoming call handling
25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, in rxrpc_dummy_notify() argument
31 * Preallocate a single service call, connection and peer and, if possible,
42 struct rxrpc_call *call, *xcall; in rxrpc_service_prealloc_one() local
100 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one()
101 if (!call) in rxrpc_service_prealloc_one()
103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); in rxrpc_service_prealloc_one()
104 call->state = RXRPC_CALL_SERVER_PREALLOC; in rxrpc_service_prealloc_one()
106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, in rxrpc_service_prealloc_one()
107 atomic_read(&call->usage), in rxrpc_service_prealloc_one()
126 call->user_call_ID = user_call_ID; in rxrpc_service_prealloc_one()
127 call->notify_rx = notify_rx; in rxrpc_service_prealloc_one()
129 rxrpc_get_call(call, rxrpc_call_got_kernel); in rxrpc_service_prealloc_one()
130 user_attach_call(call, user_call_ID); in rxrpc_service_prealloc_one()
133 rxrpc_get_call(call, rxrpc_call_got_userid); in rxrpc_service_prealloc_one()
134 rb_link_node(&call->sock_node, parent, pp); in rxrpc_service_prealloc_one()
135 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_service_prealloc_one()
136 set_bit(RXRPC_CALL_HAS_USERID, &call->flags); in rxrpc_service_prealloc_one()
138 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_service_prealloc_one()
142 rxnet = call->rxnet; in rxrpc_service_prealloc_one()
144 list_add_tail(&call->link, &rxnet->calls); in rxrpc_service_prealloc_one()
147 b->call_backlog[call_head] = call; in rxrpc_service_prealloc_one()
149 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); in rxrpc_service_prealloc_one()
154 rxrpc_cleanup_call(call); in rxrpc_service_prealloc_one()
221 struct rxrpc_call *call = b->call_backlog[tail]; in rxrpc_discard_prealloc() local
222 rcu_assign_pointer(call->socket, rx); in rxrpc_discard_prealloc()
224 _debug("discard %lx", call->user_call_ID); in rxrpc_discard_prealloc()
225 rx->discard_new_call(call, call->user_call_ID); in rxrpc_discard_prealloc()
226 if (call->notify_rx) in rxrpc_discard_prealloc()
227 call->notify_rx = rxrpc_dummy_notify; in rxrpc_discard_prealloc()
228 rxrpc_put_call(call, rxrpc_call_put_kernel); in rxrpc_discard_prealloc()
230 rxrpc_call_completed(call); in rxrpc_discard_prealloc()
231 rxrpc_release_call(rx, call); in rxrpc_discard_prealloc()
232 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_discard_prealloc()
243 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) in rxrpc_send_ping() argument
248 if (call->peer->rtt_count < 3 || in rxrpc_send_ping()
249 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) in rxrpc_send_ping()
250 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, in rxrpc_send_ping()
256 * Allocate a new incoming call from the prealloc pool, along with a connection
268 struct rxrpc_call *call; in rxrpc_alloc_incoming_call() local
317 /* And now we can allocate and set up a new call */ in rxrpc_alloc_incoming_call()
318 call = b->call_backlog[call_tail]; in rxrpc_alloc_incoming_call()
323 rxrpc_see_call(call); in rxrpc_alloc_incoming_call()
324 call->conn = conn; in rxrpc_alloc_incoming_call()
325 call->security = conn->security; in rxrpc_alloc_incoming_call()
326 call->security_ix = conn->security_ix; in rxrpc_alloc_incoming_call()
327 call->peer = rxrpc_get_peer(conn->params.peer); in rxrpc_alloc_incoming_call()
328 call->cong_cwnd = call->peer->cong_cwnd; in rxrpc_alloc_incoming_call()
329 return call; in rxrpc_alloc_incoming_call()
333 * Set up a new incoming call. Called in BH context with the RCU read lock
336 * If this is for a kernel service, when we allocate the call, it will have
345 * The call is returned with the user access mutex held.
355 struct rxrpc_call *call = NULL; in rxrpc_new_incoming_call() local
370 /* The peer, connection and call may all have sprung into existence due in rxrpc_new_incoming_call()
380 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); in rxrpc_new_incoming_call()
382 if (!call) { in rxrpc_new_incoming_call()
387 trace_rxrpc_receive(call, rxrpc_receive_incoming, in rxrpc_new_incoming_call()
390 /* Make the call live. */ in rxrpc_new_incoming_call()
391 rxrpc_incoming_call(rx, call, skb); in rxrpc_new_incoming_call()
392 conn = call->conn; in rxrpc_new_incoming_call()
395 rx->notify_new_call(&rx->sk, call, call->user_call_ID); in rxrpc_new_incoming_call()
401 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); in rxrpc_new_incoming_call()
402 rxrpc_queue_conn(call->conn); in rxrpc_new_incoming_call()
406 write_lock(&call->state_lock); in rxrpc_new_incoming_call()
407 if (call->state < RXRPC_CALL_COMPLETE) in rxrpc_new_incoming_call()
408 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; in rxrpc_new_incoming_call()
409 write_unlock(&call->state_lock); in rxrpc_new_incoming_call()
413 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, in rxrpc_new_incoming_call()
417 rxrpc_abort_call("CON", call, sp->hdr.seq, in rxrpc_new_incoming_call()
426 rxrpc_send_ping(call, skb); in rxrpc_new_incoming_call()
431 * service to prevent the call from being deallocated too early. in rxrpc_new_incoming_call()
433 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_new_incoming_call()
435 _leave(" = %p{%d}", call, call->debug_id); in rxrpc_new_incoming_call()
436 return call; in rxrpc_new_incoming_call()
445 * Charge up socket with preallocated calls, attaching user call IDs.
462 * @notify_rx: Event notification function for the call
463 * @user_attach_call: Func to attach call to user_call_ID
464 * @user_call_ID: The tag to attach to the preallocated call
470 * The user is given a ref to hold on the call.
472 * Note that the call may be come connected before this function returns.