Lines Matching +full:rpc +full:- +full:if
1 /* SPDX-License-Identifier: GPL-2.0 */
5 * RPC server declarations.
28 * RPC service thread pool.
31 * a single one of these per RPC service, but on NUMA machines those
33 * have one pool per NUMA node. This optimisation reduces cross-
34 * node traffic on multi-node NUMA NFS servers.
53 SP_TASK_PENDING, /* still work to do even if no xprt is queued */
60 * RPC service.
62 * An RPC service is a ``daemon,'' possibly multithreaded, which
63 * receives and processes incoming RPC messages.
67 * We currently do not support more than one RPC program per daemon.
70 struct svc_program * sv_programs; /* RPC programs */
71 struct svc_stat * sv_stats; /* RPC statistics */
90 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
107 * Maximum payload size supported by a kernel RPC server.
115 * For UDP transports, a block plus NFS,RPC, and UDP headers
134 * RPC Requests and replies are stored in one or more pages.
139 * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
151 * This assumes that the non-page part of an rpc reply will fit
152 * in a page - NFSd ensures this. lockd also has no trouble.
156 * We using ->sendfile to return read data, we might need one extra page
157 * if the request is not page-aligned. So add another '1'.
159 #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
175 * - reply from here */
178 struct svc_serv * rq_server; /* RPC service definition */
212 void * rq_auth_data; /* flavor-specific data */
227 struct auth_domain * rq_client; /* RPC peer info */
228 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
241 unsigned int rq_status_counter; /* RPC processing counter */
254 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
261 return (struct sockaddr_in *) &rqst->rq_addr; in svc_addr_in()
266 return (struct sockaddr_in6 *) &rqst->rq_addr; in svc_addr_in6()
271 return (struct sockaddr *) &rqst->rq_addr; in svc_addr()
276 return (struct sockaddr_in *) &rqst->rq_daddr; in svc_daddr_in()
281 return (struct sockaddr_in6 *) &rqst->rq_daddr; in svc_daddr_in6()
286 return (struct sockaddr *) &rqst->rq_daddr; in svc_daddr()
290 * svc_thread_should_stop - check if this thread should stop
305 if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags)) in svc_thread_should_stop()
306 set_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop()
308 return test_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop()
312 * svc_thread_init_status - report whether thread has initialised successfully
320 * If zero is passed, the thread is ready and must continue until
321 * svc_thread_should_stop() returns true. If a non-zero error is passed
322 * the call will not return - the thread will exit.
326 store_release_wake_up(&rqstp->rq_err, err); in svc_thread_init_status()
327 if (err) in svc_thread_init_status()
355 * RPC program - an array of these can use the same transport endpoint
377 * RPC program version
382 const struct svc_procedure *vs_proc; /* per-procedure info */
389 /* Don't care if the rpcbind registration fails */
400 * RPC procedure info
480 svc_reserve(rqstp, space + rqstp->rq_auth_slack); in svc_reserve_auth()
484 * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
485 * @rqstp: controlling server RPC transaction context
490 struct xdr_stream *xdr = &rqstp->rq_arg_stream; in svcxdr_init_decode()
491 struct xdr_buf *buf = &rqstp->rq_arg; in svcxdr_init_decode()
492 struct kvec *argv = buf->head; in svcxdr_init_decode()
494 WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len); in svcxdr_init_decode()
495 buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len; in svcxdr_init_decode()
497 xdr_init_decode(xdr, buf, argv->iov_base, NULL); in svcxdr_init_decode()
498 xdr_set_scratch_page(xdr, rqstp->rq_scratch_page); in svcxdr_init_decode()
502 * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
503 * @rqstp: controlling server RPC transaction context
508 struct xdr_stream *xdr = &rqstp->rq_res_stream; in svcxdr_init_encode()
509 struct xdr_buf *buf = &rqstp->rq_res; in svcxdr_init_encode()
510 struct kvec *resv = buf->head; in svcxdr_init_encode()
514 xdr->buf = buf; in svcxdr_init_encode()
515 xdr->iov = resv; in svcxdr_init_encode()
516 xdr->p = resv->iov_base + resv->iov_len; in svcxdr_init_encode()
517 xdr->end = resv->iov_base + PAGE_SIZE; in svcxdr_init_encode()
518 buf->len = resv->iov_len; in svcxdr_init_encode()
519 xdr->page_ptr = buf->pages - 1; in svcxdr_init_encode()
520 buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages); in svcxdr_init_encode()
521 xdr->rqst = NULL; in svcxdr_init_encode()
525 * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
542 xdr->page_ptr = rqstp->rq_next_page - 1; in svcxdr_encode_opaque_pages()
546 * svcxdr_set_auth_slack -
547 * @rqstp: RPC transaction
555 struct xdr_stream *xdr = &rqstp->rq_res_stream; in svcxdr_set_auth_slack()
556 struct xdr_buf *buf = &rqstp->rq_res; in svcxdr_set_auth_slack()
557 struct kvec *resv = buf->head; in svcxdr_set_auth_slack()
559 rqstp->rq_auth_slack = slack; in svcxdr_set_auth_slack()
561 xdr->end -= XDR_QUADLEN(slack); in svcxdr_set_auth_slack()
562 buf->buflen -= rqstp->rq_auth_slack; in svcxdr_set_auth_slack()
564 WARN_ON(xdr->iov != resv); in svcxdr_set_auth_slack()
565 WARN_ON(xdr->p > xdr->end); in svcxdr_set_auth_slack()
569 * svcxdr_set_accept_stat - Reserve space for the accept_stat field
570 * @rqstp: RPC transaction context
578 struct xdr_stream *xdr = &rqstp->rq_res_stream; in svcxdr_set_accept_stat()
580 rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT); in svcxdr_set_accept_stat()
581 if (unlikely(!rqstp->rq_accept_statp)) in svcxdr_set_accept_stat()
583 *rqstp->rq_accept_statp = rpc_success; in svcxdr_set_accept_stat()