1 /*
2  * linux/net/sunrpc/svc.c
3  *
4  * High-level RPC service routines
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  *
8  * Multiple threads pools and NUMAisation
9  * Copyright (c) 2006 Silicon Graphics, Inc.
10  * by Greg Banks <gnb@melbourne.sgi.com>
11  */
12 
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
17 #include <linux/in.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 #include <linux/slab.h>
23 
24 #include <linux/sunrpc/types.h>
25 #include <linux/sunrpc/xdr.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svcsock.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/bc_xprt.h>
30 
31 #define RPCDBG_FACILITY	RPCDBG_SVCDSP
32 
33 static void svc_unregister(const struct svc_serv *serv);
34 
35 #define svc_serv_is_pooled(serv)    ((serv)->sv_function)
36 
37 /*
38  * Mode for mapping cpus to pools.
39  */
40 enum {
41 	SVC_POOL_AUTO = -1,	/* choose one of the others */
42 	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
43 				 * (legacy & UP mode) */
44 	SVC_POOL_PERCPU,	/* one pool per cpu */
45 	SVC_POOL_PERNODE	/* one pool per numa node */
46 };
47 #define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL
48 
49 /*
50  * Structure for mapping cpus to pools and vice versa.
51  * Setup once during sunrpc initialisation.
52  */
53 static struct svc_pool_map {
54 	int count;			/* How many svc_servs use us */
55 	int mode;			/* Note: int not enum to avoid
56 					 * warnings about "enumeration value
57 					 * not handled in switch" */
58 	unsigned int npools;
59 	unsigned int *pool_to;		/* maps pool id to cpu or node */
60 	unsigned int *to_pool;		/* maps cpu or node to pool id */
61 } svc_pool_map = {
62 	.count = 0,
63 	.mode = SVC_POOL_DEFAULT
64 };
65 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
66 
67 static int
param_set_pool_mode(const char * val,struct kernel_param * kp)68 param_set_pool_mode(const char *val, struct kernel_param *kp)
69 {
70 	int *ip = (int *)kp->arg;
71 	struct svc_pool_map *m = &svc_pool_map;
72 	int err;
73 
74 	mutex_lock(&svc_pool_map_mutex);
75 
76 	err = -EBUSY;
77 	if (m->count)
78 		goto out;
79 
80 	err = 0;
81 	if (!strncmp(val, "auto", 4))
82 		*ip = SVC_POOL_AUTO;
83 	else if (!strncmp(val, "global", 6))
84 		*ip = SVC_POOL_GLOBAL;
85 	else if (!strncmp(val, "percpu", 6))
86 		*ip = SVC_POOL_PERCPU;
87 	else if (!strncmp(val, "pernode", 7))
88 		*ip = SVC_POOL_PERNODE;
89 	else
90 		err = -EINVAL;
91 
92 out:
93 	mutex_unlock(&svc_pool_map_mutex);
94 	return err;
95 }
96 
97 static int
param_get_pool_mode(char * buf,struct kernel_param * kp)98 param_get_pool_mode(char *buf, struct kernel_param *kp)
99 {
100 	int *ip = (int *)kp->arg;
101 
102 	switch (*ip)
103 	{
104 	case SVC_POOL_AUTO:
105 		return strlcpy(buf, "auto", 20);
106 	case SVC_POOL_GLOBAL:
107 		return strlcpy(buf, "global", 20);
108 	case SVC_POOL_PERCPU:
109 		return strlcpy(buf, "percpu", 20);
110 	case SVC_POOL_PERNODE:
111 		return strlcpy(buf, "pernode", 20);
112 	default:
113 		return sprintf(buf, "%d", *ip);
114 	}
115 }
116 
117 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
118 		 &svc_pool_map.mode, 0644);
119 
120 /*
121  * Detect best pool mapping mode heuristically,
122  * according to the machine's topology.
123  */
124 static int
svc_pool_map_choose_mode(void)125 svc_pool_map_choose_mode(void)
126 {
127 	unsigned int node;
128 
129 	if (nr_online_nodes > 1) {
130 		/*
131 		 * Actually have multiple NUMA nodes,
132 		 * so split pools on NUMA node boundaries
133 		 */
134 		return SVC_POOL_PERNODE;
135 	}
136 
137 	node = first_online_node;
138 	if (nr_cpus_node(node) > 2) {
139 		/*
140 		 * Non-trivial SMP, or CONFIG_NUMA on
141 		 * non-NUMA hardware, e.g. with a generic
142 		 * x86_64 kernel on Xeons.  In this case we
143 		 * want to divide the pools on cpu boundaries.
144 		 */
145 		return SVC_POOL_PERCPU;
146 	}
147 
148 	/* default: one global pool */
149 	return SVC_POOL_GLOBAL;
150 }
151 
152 /*
153  * Allocate the to_pool[] and pool_to[] arrays.
154  * Returns 0 on success or an errno.
155  */
156 static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)157 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
158 {
159 	m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
160 	if (!m->to_pool)
161 		goto fail;
162 	m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
163 	if (!m->pool_to)
164 		goto fail_free;
165 
166 	return 0;
167 
168 fail_free:
169 	kfree(m->to_pool);
170 	m->to_pool = NULL;
171 fail:
172 	return -ENOMEM;
173 }
174 
175 /*
176  * Initialise the pool map for SVC_POOL_PERCPU mode.
177  * Returns number of pools or <0 on error.
178  */
179 static int
svc_pool_map_init_percpu(struct svc_pool_map * m)180 svc_pool_map_init_percpu(struct svc_pool_map *m)
181 {
182 	unsigned int maxpools = nr_cpu_ids;
183 	unsigned int pidx = 0;
184 	unsigned int cpu;
185 	int err;
186 
187 	err = svc_pool_map_alloc_arrays(m, maxpools);
188 	if (err)
189 		return err;
190 
191 	for_each_online_cpu(cpu) {
192 		BUG_ON(pidx > maxpools);
193 		m->to_pool[cpu] = pidx;
194 		m->pool_to[pidx] = cpu;
195 		pidx++;
196 	}
197 	/* cpus brought online later all get mapped to pool0, sorry */
198 
199 	return pidx;
200 };
201 
202 
203 /*
204  * Initialise the pool map for SVC_POOL_PERNODE mode.
205  * Returns number of pools or <0 on error.
206  */
207 static int
svc_pool_map_init_pernode(struct svc_pool_map * m)208 svc_pool_map_init_pernode(struct svc_pool_map *m)
209 {
210 	unsigned int maxpools = nr_node_ids;
211 	unsigned int pidx = 0;
212 	unsigned int node;
213 	int err;
214 
215 	err = svc_pool_map_alloc_arrays(m, maxpools);
216 	if (err)
217 		return err;
218 
219 	for_each_node_with_cpus(node) {
220 		/* some architectures (e.g. SN2) have cpuless nodes */
221 		BUG_ON(pidx > maxpools);
222 		m->to_pool[node] = pidx;
223 		m->pool_to[pidx] = node;
224 		pidx++;
225 	}
226 	/* nodes brought online later all get mapped to pool0, sorry */
227 
228 	return pidx;
229 }
230 
231 
232 /*
233  * Add a reference to the global map of cpus to pools (and
234  * vice versa).  Initialise the map if we're the first user.
235  * Returns the number of pools.
236  */
237 static unsigned int
svc_pool_map_get(void)238 svc_pool_map_get(void)
239 {
240 	struct svc_pool_map *m = &svc_pool_map;
241 	int npools = -1;
242 
243 	mutex_lock(&svc_pool_map_mutex);
244 
245 	if (m->count++) {
246 		mutex_unlock(&svc_pool_map_mutex);
247 		return m->npools;
248 	}
249 
250 	if (m->mode == SVC_POOL_AUTO)
251 		m->mode = svc_pool_map_choose_mode();
252 
253 	switch (m->mode) {
254 	case SVC_POOL_PERCPU:
255 		npools = svc_pool_map_init_percpu(m);
256 		break;
257 	case SVC_POOL_PERNODE:
258 		npools = svc_pool_map_init_pernode(m);
259 		break;
260 	}
261 
262 	if (npools < 0) {
263 		/* default, or memory allocation failure */
264 		npools = 1;
265 		m->mode = SVC_POOL_GLOBAL;
266 	}
267 	m->npools = npools;
268 
269 	mutex_unlock(&svc_pool_map_mutex);
270 	return m->npools;
271 }
272 
273 
274 /*
275  * Drop a reference to the global map of cpus to pools.
276  * When the last reference is dropped, the map data is
277  * freed; this allows the sysadmin to change the pool
278  * mode using the pool_mode module option without
279  * rebooting or re-loading sunrpc.ko.
280  */
281 static void
svc_pool_map_put(void)282 svc_pool_map_put(void)
283 {
284 	struct svc_pool_map *m = &svc_pool_map;
285 
286 	mutex_lock(&svc_pool_map_mutex);
287 
288 	if (!--m->count) {
289 		kfree(m->to_pool);
290 		m->to_pool = NULL;
291 		kfree(m->pool_to);
292 		m->pool_to = NULL;
293 		m->npools = 0;
294 	}
295 
296 	mutex_unlock(&svc_pool_map_mutex);
297 }
298 
299 
svc_pool_map_get_node(unsigned int pidx)300 static int svc_pool_map_get_node(unsigned int pidx)
301 {
302 	const struct svc_pool_map *m = &svc_pool_map;
303 
304 	if (m->count) {
305 		if (m->mode == SVC_POOL_PERCPU)
306 			return cpu_to_node(m->pool_to[pidx]);
307 		if (m->mode == SVC_POOL_PERNODE)
308 			return m->pool_to[pidx];
309 	}
310 	return NUMA_NO_NODE;
311 }
312 /*
313  * Set the given thread's cpus_allowed mask so that it
314  * will only run on cpus in the given pool.
315  */
316 static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)317 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
318 {
319 	struct svc_pool_map *m = &svc_pool_map;
320 	unsigned int node = m->pool_to[pidx];
321 
322 	/*
323 	 * The caller checks for sv_nrpools > 1, which
324 	 * implies that we've been initialized.
325 	 */
326 	BUG_ON(m->count == 0);
327 
328 	switch (m->mode) {
329 	case SVC_POOL_PERCPU:
330 	{
331 		set_cpus_allowed_ptr(task, cpumask_of(node));
332 		break;
333 	}
334 	case SVC_POOL_PERNODE:
335 	{
336 		set_cpus_allowed_ptr(task, cpumask_of_node(node));
337 		break;
338 	}
339 	}
340 }
341 
342 /*
343  * Use the mapping mode to choose a pool for a given CPU.
344  * Used when enqueueing an incoming RPC.  Always returns
345  * a non-NULL pool pointer.
346  */
347 struct svc_pool *
svc_pool_for_cpu(struct svc_serv * serv,int cpu)348 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
349 {
350 	struct svc_pool_map *m = &svc_pool_map;
351 	unsigned int pidx = 0;
352 
353 	/*
354 	 * An uninitialised map happens in a pure client when
355 	 * lockd is brought up, so silently treat it the
356 	 * same as SVC_POOL_GLOBAL.
357 	 */
358 	if (svc_serv_is_pooled(serv)) {
359 		switch (m->mode) {
360 		case SVC_POOL_PERCPU:
361 			pidx = m->to_pool[cpu];
362 			break;
363 		case SVC_POOL_PERNODE:
364 			pidx = m->to_pool[cpu_to_node(cpu)];
365 			break;
366 		}
367 	}
368 	return &serv->sv_pools[pidx % serv->sv_nrpools];
369 }
370 
svc_rpcb_setup(struct svc_serv * serv)371 static int svc_rpcb_setup(struct svc_serv *serv)
372 {
373 	int err;
374 
375 	err = rpcb_create_local();
376 	if (err)
377 		return err;
378 
379 	/* Remove any stale portmap registrations */
380 	svc_unregister(serv);
381 	return 0;
382 }
383 
svc_rpcb_cleanup(struct svc_serv * serv)384 void svc_rpcb_cleanup(struct svc_serv *serv)
385 {
386 	svc_unregister(serv);
387 	rpcb_put_local();
388 }
389 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
390 
svc_uses_rpcbind(struct svc_serv * serv)391 static int svc_uses_rpcbind(struct svc_serv *serv)
392 {
393 	struct svc_program	*progp;
394 	unsigned int		i;
395 
396 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
397 		for (i = 0; i < progp->pg_nvers; i++) {
398 			if (progp->pg_vers[i] == NULL)
399 				continue;
400 			if (progp->pg_vers[i]->vs_hidden == 0)
401 				return 1;
402 		}
403 	}
404 
405 	return 0;
406 }
407 
408 /*
409  * Create an RPC service
410  */
411 static struct svc_serv *
__svc_create(struct svc_program * prog,unsigned int bufsize,int npools,void (* shutdown)(struct svc_serv * serv))412 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
413 	     void (*shutdown)(struct svc_serv *serv))
414 {
415 	struct svc_serv	*serv;
416 	unsigned int vers;
417 	unsigned int xdrsize;
418 	unsigned int i;
419 
420 	if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
421 		return NULL;
422 	serv->sv_name      = prog->pg_name;
423 	serv->sv_program   = prog;
424 	serv->sv_nrthreads = 1;
425 	serv->sv_stats     = prog->pg_stats;
426 	if (bufsize > RPCSVC_MAXPAYLOAD)
427 		bufsize = RPCSVC_MAXPAYLOAD;
428 	serv->sv_max_payload = bufsize? bufsize : 4096;
429 	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
430 	serv->sv_shutdown  = shutdown;
431 	xdrsize = 0;
432 	while (prog) {
433 		prog->pg_lovers = prog->pg_nvers-1;
434 		for (vers=0; vers<prog->pg_nvers ; vers++)
435 			if (prog->pg_vers[vers]) {
436 				prog->pg_hivers = vers;
437 				if (prog->pg_lovers > vers)
438 					prog->pg_lovers = vers;
439 				if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
440 					xdrsize = prog->pg_vers[vers]->vs_xdrsize;
441 			}
442 		prog = prog->pg_next;
443 	}
444 	serv->sv_xdrsize   = xdrsize;
445 	INIT_LIST_HEAD(&serv->sv_tempsocks);
446 	INIT_LIST_HEAD(&serv->sv_permsocks);
447 	init_timer(&serv->sv_temptimer);
448 	spin_lock_init(&serv->sv_lock);
449 
450 	serv->sv_nrpools = npools;
451 	serv->sv_pools =
452 		kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
453 			GFP_KERNEL);
454 	if (!serv->sv_pools) {
455 		kfree(serv);
456 		return NULL;
457 	}
458 
459 	for (i = 0; i < serv->sv_nrpools; i++) {
460 		struct svc_pool *pool = &serv->sv_pools[i];
461 
462 		dprintk("svc: initialising pool %u for %s\n",
463 				i, serv->sv_name);
464 
465 		pool->sp_id = i;
466 		INIT_LIST_HEAD(&pool->sp_threads);
467 		INIT_LIST_HEAD(&pool->sp_sockets);
468 		INIT_LIST_HEAD(&pool->sp_all_threads);
469 		spin_lock_init(&pool->sp_lock);
470 	}
471 
472 	if (svc_uses_rpcbind(serv)) {
473 	       	if (svc_rpcb_setup(serv) < 0) {
474 			kfree(serv->sv_pools);
475 			kfree(serv);
476 			return NULL;
477 		}
478 		if (!serv->sv_shutdown)
479 			serv->sv_shutdown = svc_rpcb_cleanup;
480 	}
481 
482 	return serv;
483 }
484 
485 struct svc_serv *
svc_create(struct svc_program * prog,unsigned int bufsize,void (* shutdown)(struct svc_serv * serv))486 svc_create(struct svc_program *prog, unsigned int bufsize,
487 	   void (*shutdown)(struct svc_serv *serv))
488 {
489 	return __svc_create(prog, bufsize, /*npools*/1, shutdown);
490 }
491 EXPORT_SYMBOL_GPL(svc_create);
492 
493 struct svc_serv *
svc_create_pooled(struct svc_program * prog,unsigned int bufsize,void (* shutdown)(struct svc_serv * serv),svc_thread_fn func,struct module * mod)494 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
495 		  void (*shutdown)(struct svc_serv *serv),
496 		  svc_thread_fn func, struct module *mod)
497 {
498 	struct svc_serv *serv;
499 	unsigned int npools = svc_pool_map_get();
500 
501 	serv = __svc_create(prog, bufsize, npools, shutdown);
502 
503 	if (serv != NULL) {
504 		serv->sv_function = func;
505 		serv->sv_module = mod;
506 	}
507 
508 	return serv;
509 }
510 EXPORT_SYMBOL_GPL(svc_create_pooled);
511 
512 /*
513  * Destroy an RPC service. Should be called with appropriate locking to
514  * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
515  */
516 void
svc_destroy(struct svc_serv * serv)517 svc_destroy(struct svc_serv *serv)
518 {
519 	dprintk("svc: svc_destroy(%s, %d)\n",
520 				serv->sv_program->pg_name,
521 				serv->sv_nrthreads);
522 
523 	if (serv->sv_nrthreads) {
524 		if (--(serv->sv_nrthreads) != 0) {
525 			svc_sock_update_bufs(serv);
526 			return;
527 		}
528 	} else
529 		printk("svc_destroy: no threads for serv=%p!\n", serv);
530 
531 	del_timer_sync(&serv->sv_temptimer);
532 	/*
533 	 * The set of xprts (contained in the sv_tempsocks and
534 	 * sv_permsocks lists) is now constant, since it is modified
535 	 * only by accepting new sockets (done by service threads in
536 	 * svc_recv) or aging old ones (done by sv_temptimer), or
537 	 * configuration changes (excluded by whatever locking the
538 	 * caller is using--nfsd_mutex in the case of nfsd).  So it's
539 	 * safe to traverse those lists and shut everything down:
540 	 */
541 	svc_close_all(serv);
542 
543 	if (serv->sv_shutdown)
544 		serv->sv_shutdown(serv);
545 
546 	cache_clean_deferred(serv);
547 
548 	if (svc_serv_is_pooled(serv))
549 		svc_pool_map_put();
550 
551 	kfree(serv->sv_pools);
552 	kfree(serv);
553 }
554 EXPORT_SYMBOL_GPL(svc_destroy);
555 
556 /*
557  * Allocate an RPC server's buffer space.
558  * We allocate pages and place them in rq_argpages.
559  */
560 static int
svc_init_buffer(struct svc_rqst * rqstp,unsigned int size,int node)561 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
562 {
563 	unsigned int pages, arghi;
564 
565 	/* bc_xprt uses fore channel allocated buffers */
566 	if (svc_is_backchannel(rqstp))
567 		return 1;
568 
569 	pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
570 				       * We assume one is at most one page
571 				       */
572 	arghi = 0;
573 	BUG_ON(pages > RPCSVC_MAXPAGES);
574 	while (pages) {
575 		struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
576 		if (!p)
577 			break;
578 		rqstp->rq_pages[arghi++] = p;
579 		pages--;
580 	}
581 	return pages == 0;
582 }
583 
584 /*
585  * Release an RPC server buffer
586  */
587 static void
svc_release_buffer(struct svc_rqst * rqstp)588 svc_release_buffer(struct svc_rqst *rqstp)
589 {
590 	unsigned int i;
591 
592 	for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
593 		if (rqstp->rq_pages[i])
594 			put_page(rqstp->rq_pages[i]);
595 }
596 
597 struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool,int node)598 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
599 {
600 	struct svc_rqst	*rqstp;
601 
602 	rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
603 	if (!rqstp)
604 		goto out_enomem;
605 
606 	init_waitqueue_head(&rqstp->rq_wait);
607 
608 	serv->sv_nrthreads++;
609 	spin_lock_bh(&pool->sp_lock);
610 	pool->sp_nrthreads++;
611 	list_add(&rqstp->rq_all, &pool->sp_all_threads);
612 	spin_unlock_bh(&pool->sp_lock);
613 	rqstp->rq_server = serv;
614 	rqstp->rq_pool = pool;
615 
616 	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
617 	if (!rqstp->rq_argp)
618 		goto out_thread;
619 
620 	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
621 	if (!rqstp->rq_resp)
622 		goto out_thread;
623 
624 	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
625 		goto out_thread;
626 
627 	return rqstp;
628 out_thread:
629 	svc_exit_thread(rqstp);
630 out_enomem:
631 	return ERR_PTR(-ENOMEM);
632 }
633 EXPORT_SYMBOL_GPL(svc_prepare_thread);
634 
635 /*
636  * Choose a pool in which to create a new thread, for svc_set_num_threads
637  */
638 static inline struct svc_pool *
choose_pool(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)639 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
640 {
641 	if (pool != NULL)
642 		return pool;
643 
644 	return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
645 }
646 
647 /*
648  * Choose a thread to kill, for svc_set_num_threads
649  */
650 static inline struct task_struct *
choose_victim(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)651 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
652 {
653 	unsigned int i;
654 	struct task_struct *task = NULL;
655 
656 	if (pool != NULL) {
657 		spin_lock_bh(&pool->sp_lock);
658 	} else {
659 		/* choose a pool in round-robin fashion */
660 		for (i = 0; i < serv->sv_nrpools; i++) {
661 			pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
662 			spin_lock_bh(&pool->sp_lock);
663 			if (!list_empty(&pool->sp_all_threads))
664 				goto found_pool;
665 			spin_unlock_bh(&pool->sp_lock);
666 		}
667 		return NULL;
668 	}
669 
670 found_pool:
671 	if (!list_empty(&pool->sp_all_threads)) {
672 		struct svc_rqst *rqstp;
673 
674 		/*
675 		 * Remove from the pool->sp_all_threads list
676 		 * so we don't try to kill it again.
677 		 */
678 		rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
679 		list_del_init(&rqstp->rq_all);
680 		task = rqstp->rq_task;
681 	}
682 	spin_unlock_bh(&pool->sp_lock);
683 
684 	return task;
685 }
686 
687 /*
688  * Create or destroy enough new threads to make the number
689  * of threads the given number.  If `pool' is non-NULL, applies
690  * only to threads in that pool, otherwise round-robins between
691  * all pools.  Caller must ensure that mutual exclusion between this and
692  * server startup or shutdown.
693  *
694  * Destroying threads relies on the service threads filling in
695  * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
696  * has been created using svc_create_pooled().
697  *
698  * Based on code that used to be in nfsd_svc() but tweaked
699  * to be pool-aware.
700  */
701 int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)702 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
703 {
704 	struct svc_rqst	*rqstp;
705 	struct task_struct *task;
706 	struct svc_pool *chosen_pool;
707 	int error = 0;
708 	unsigned int state = serv->sv_nrthreads-1;
709 	int node;
710 
711 	if (pool == NULL) {
712 		/* The -1 assumes caller has done a svc_get() */
713 		nrservs -= (serv->sv_nrthreads-1);
714 	} else {
715 		spin_lock_bh(&pool->sp_lock);
716 		nrservs -= pool->sp_nrthreads;
717 		spin_unlock_bh(&pool->sp_lock);
718 	}
719 
720 	/* create new threads */
721 	while (nrservs > 0) {
722 		nrservs--;
723 		chosen_pool = choose_pool(serv, pool, &state);
724 
725 		node = svc_pool_map_get_node(chosen_pool->sp_id);
726 		rqstp = svc_prepare_thread(serv, chosen_pool, node);
727 		if (IS_ERR(rqstp)) {
728 			error = PTR_ERR(rqstp);
729 			break;
730 		}
731 
732 		__module_get(serv->sv_module);
733 		task = kthread_create_on_node(serv->sv_function, rqstp,
734 					      node, serv->sv_name);
735 		if (IS_ERR(task)) {
736 			error = PTR_ERR(task);
737 			module_put(serv->sv_module);
738 			svc_exit_thread(rqstp);
739 			break;
740 		}
741 
742 		rqstp->rq_task = task;
743 		if (serv->sv_nrpools > 1)
744 			svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
745 
746 		svc_sock_update_bufs(serv);
747 		wake_up_process(task);
748 	}
749 	/* destroy old threads */
750 	while (nrservs < 0 &&
751 	       (task = choose_victim(serv, pool, &state)) != NULL) {
752 		send_sig(SIGINT, task, 1);
753 		nrservs++;
754 	}
755 
756 	return error;
757 }
758 EXPORT_SYMBOL_GPL(svc_set_num_threads);
759 
760 /*
761  * Called from a server thread as it's exiting. Caller must hold the BKL or
762  * the "service mutex", whichever is appropriate for the service.
763  */
764 void
svc_exit_thread(struct svc_rqst * rqstp)765 svc_exit_thread(struct svc_rqst *rqstp)
766 {
767 	struct svc_serv	*serv = rqstp->rq_server;
768 	struct svc_pool	*pool = rqstp->rq_pool;
769 
770 	svc_release_buffer(rqstp);
771 	kfree(rqstp->rq_resp);
772 	kfree(rqstp->rq_argp);
773 	kfree(rqstp->rq_auth_data);
774 
775 	spin_lock_bh(&pool->sp_lock);
776 	pool->sp_nrthreads--;
777 	list_del(&rqstp->rq_all);
778 	spin_unlock_bh(&pool->sp_lock);
779 
780 	kfree(rqstp);
781 
782 	/* Release the server */
783 	if (serv)
784 		svc_destroy(serv);
785 }
786 EXPORT_SYMBOL_GPL(svc_exit_thread);
787 
788 /*
789  * Register an "inet" protocol family netid with the local
790  * rpcbind daemon via an rpcbind v4 SET request.
791  *
792  * No netconfig infrastructure is available in the kernel, so
793  * we map IP_ protocol numbers to netids by hand.
794  *
795  * Returns zero on success; a negative errno value is returned
796  * if any error occurs.
797  */
__svc_rpcb_register4(const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)798 static int __svc_rpcb_register4(const u32 program, const u32 version,
799 				const unsigned short protocol,
800 				const unsigned short port)
801 {
802 	const struct sockaddr_in sin = {
803 		.sin_family		= AF_INET,
804 		.sin_addr.s_addr	= htonl(INADDR_ANY),
805 		.sin_port		= htons(port),
806 	};
807 	const char *netid;
808 	int error;
809 
810 	switch (protocol) {
811 	case IPPROTO_UDP:
812 		netid = RPCBIND_NETID_UDP;
813 		break;
814 	case IPPROTO_TCP:
815 		netid = RPCBIND_NETID_TCP;
816 		break;
817 	default:
818 		return -ENOPROTOOPT;
819 	}
820 
821 	error = rpcb_v4_register(program, version,
822 					(const struct sockaddr *)&sin, netid);
823 
824 	/*
825 	 * User space didn't support rpcbind v4, so retry this
826 	 * registration request with the legacy rpcbind v2 protocol.
827 	 */
828 	if (error == -EPROTONOSUPPORT)
829 		error = rpcb_register(program, version, protocol, port);
830 
831 	return error;
832 }
833 
834 #if IS_ENABLED(CONFIG_IPV6)
835 /*
836  * Register an "inet6" protocol family netid with the local
837  * rpcbind daemon via an rpcbind v4 SET request.
838  *
839  * No netconfig infrastructure is available in the kernel, so
840  * we map IP_ protocol numbers to netids by hand.
841  *
842  * Returns zero on success; a negative errno value is returned
843  * if any error occurs.
844  */
__svc_rpcb_register6(const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)845 static int __svc_rpcb_register6(const u32 program, const u32 version,
846 				const unsigned short protocol,
847 				const unsigned short port)
848 {
849 	const struct sockaddr_in6 sin6 = {
850 		.sin6_family		= AF_INET6,
851 		.sin6_addr		= IN6ADDR_ANY_INIT,
852 		.sin6_port		= htons(port),
853 	};
854 	const char *netid;
855 	int error;
856 
857 	switch (protocol) {
858 	case IPPROTO_UDP:
859 		netid = RPCBIND_NETID_UDP6;
860 		break;
861 	case IPPROTO_TCP:
862 		netid = RPCBIND_NETID_TCP6;
863 		break;
864 	default:
865 		return -ENOPROTOOPT;
866 	}
867 
868 	error = rpcb_v4_register(program, version,
869 					(const struct sockaddr *)&sin6, netid);
870 
871 	/*
872 	 * User space didn't support rpcbind version 4, so we won't
873 	 * use a PF_INET6 listener.
874 	 */
875 	if (error == -EPROTONOSUPPORT)
876 		error = -EAFNOSUPPORT;
877 
878 	return error;
879 }
880 #endif	/* IS_ENABLED(CONFIG_IPV6) */
881 
882 /*
883  * Register a kernel RPC service via rpcbind version 4.
884  *
885  * Returns zero on success; a negative errno value is returned
886  * if any error occurs.
887  */
__svc_register(const char * progname,const u32 program,const u32 version,const int family,const unsigned short protocol,const unsigned short port)888 static int __svc_register(const char *progname,
889 			  const u32 program, const u32 version,
890 			  const int family,
891 			  const unsigned short protocol,
892 			  const unsigned short port)
893 {
894 	int error = -EAFNOSUPPORT;
895 
896 	switch (family) {
897 	case PF_INET:
898 		error = __svc_rpcb_register4(program, version,
899 						protocol, port);
900 		break;
901 #if IS_ENABLED(CONFIG_IPV6)
902 	case PF_INET6:
903 		error = __svc_rpcb_register6(program, version,
904 						protocol, port);
905 #endif
906 	}
907 
908 	if (error < 0)
909 		printk(KERN_WARNING "svc: failed to register %sv%u RPC "
910 			"service (errno %d).\n", progname, version, -error);
911 	return error;
912 }
913 
914 /**
915  * svc_register - register an RPC service with the local portmapper
916  * @serv: svc_serv struct for the service to register
917  * @family: protocol family of service's listener socket
918  * @proto: transport protocol number to advertise
919  * @port: port to advertise
920  *
921  * Service is registered for any address in the passed-in protocol family
922  */
svc_register(const struct svc_serv * serv,const int family,const unsigned short proto,const unsigned short port)923 int svc_register(const struct svc_serv *serv, const int family,
924 		 const unsigned short proto, const unsigned short port)
925 {
926 	struct svc_program	*progp;
927 	unsigned int		i;
928 	int			error = 0;
929 
930 	BUG_ON(proto == 0 && port == 0);
931 
932 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
933 		for (i = 0; i < progp->pg_nvers; i++) {
934 			if (progp->pg_vers[i] == NULL)
935 				continue;
936 
937 			dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
938 					progp->pg_name,
939 					i,
940 					proto == IPPROTO_UDP?  "udp" : "tcp",
941 					port,
942 					family,
943 					progp->pg_vers[i]->vs_hidden?
944 						" (but not telling portmap)" : "");
945 
946 			if (progp->pg_vers[i]->vs_hidden)
947 				continue;
948 
949 			error = __svc_register(progp->pg_name, progp->pg_prog,
950 						i, family, proto, port);
951 			if (error < 0)
952 				break;
953 		}
954 	}
955 
956 	return error;
957 }
958 
959 /*
960  * If user space is running rpcbind, it should take the v4 UNSET
961  * and clear everything for this [program, version].  If user space
962  * is running portmap, it will reject the v4 UNSET, but won't have
963  * any "inet6" entries anyway.  So a PMAP_UNSET should be sufficient
964  * in this case to clear all existing entries for [program, version].
965  */
__svc_unregister(const u32 program,const u32 version,const char * progname)966 static void __svc_unregister(const u32 program, const u32 version,
967 			     const char *progname)
968 {
969 	int error;
970 
971 	error = rpcb_v4_register(program, version, NULL, "");
972 
973 	/*
974 	 * User space didn't support rpcbind v4, so retry this
975 	 * request with the legacy rpcbind v2 protocol.
976 	 */
977 	if (error == -EPROTONOSUPPORT)
978 		error = rpcb_register(program, version, 0, 0);
979 
980 	dprintk("svc: %s(%sv%u), error %d\n",
981 			__func__, progname, version, error);
982 }
983 
984 /*
985  * All netids, bind addresses and ports registered for [program, version]
986  * are removed from the local rpcbind database (if the service is not
987  * hidden) to make way for a new instance of the service.
988  *
989  * The result of unregistration is reported via dprintk for those who want
990  * verification of the result, but is otherwise not important.
991  */
svc_unregister(const struct svc_serv * serv)992 static void svc_unregister(const struct svc_serv *serv)
993 {
994 	struct svc_program *progp;
995 	unsigned long flags;
996 	unsigned int i;
997 
998 	clear_thread_flag(TIF_SIGPENDING);
999 
1000 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1001 		for (i = 0; i < progp->pg_nvers; i++) {
1002 			if (progp->pg_vers[i] == NULL)
1003 				continue;
1004 			if (progp->pg_vers[i]->vs_hidden)
1005 				continue;
1006 
1007 			dprintk("svc: attempting to unregister %sv%u\n",
1008 				progp->pg_name, i);
1009 			__svc_unregister(progp->pg_prog, i, progp->pg_name);
1010 		}
1011 	}
1012 
1013 	spin_lock_irqsave(&current->sighand->siglock, flags);
1014 	recalc_sigpending();
1015 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
1016 }
1017 
1018 /*
1019  * Printk the given error with the address of the client that caused it.
1020  */
1021 static __printf(2, 3)
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1022 int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1023 {
1024 	va_list args;
1025 	int 	r;
1026 	char 	buf[RPC_MAX_ADDRBUFLEN];
1027 
1028 	if (!net_ratelimit())
1029 		return 0;
1030 
1031 	printk(KERN_WARNING "svc: %s: ",
1032 		svc_print_addr(rqstp, buf, sizeof(buf)));
1033 
1034 	va_start(args, fmt);
1035 	r = vprintk(fmt, args);
1036 	va_end(args);
1037 
1038 	return r;
1039 }
1040 
1041 /*
1042  * Common routine for processing the RPC request.
1043  */
1044 static int
svc_process_common(struct svc_rqst * rqstp,struct kvec * argv,struct kvec * resv)1045 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1046 {
1047 	struct svc_program	*progp;
1048 	struct svc_version	*versp = NULL;	/* compiler food */
1049 	struct svc_procedure	*procp = NULL;
1050 	struct svc_serv		*serv = rqstp->rq_server;
1051 	kxdrproc_t		xdr;
1052 	__be32			*statp;
1053 	u32			prog, vers, proc;
1054 	__be32			auth_stat, rpc_stat;
1055 	int			auth_res;
1056 	__be32			*reply_statp;
1057 
1058 	rpc_stat = rpc_success;
1059 
1060 	if (argv->iov_len < 6*4)
1061 		goto err_short_len;
1062 
1063 	/* Will be turned off only in gss privacy case: */
1064 	rqstp->rq_splice_ok = 1;
1065 	/* Will be turned off only when NFSv4 Sessions are used */
1066 	rqstp->rq_usedeferral = 1;
1067 	rqstp->rq_dropme = false;
1068 
1069 	/* Setup reply header */
1070 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1071 
1072 	svc_putu32(resv, rqstp->rq_xid);
1073 
1074 	vers = svc_getnl(argv);
1075 
1076 	/* First words of reply: */
1077 	svc_putnl(resv, 1);		/* REPLY */
1078 
1079 	if (vers != 2)		/* RPC version number */
1080 		goto err_bad_rpc;
1081 
1082 	/* Save position in case we later decide to reject: */
1083 	reply_statp = resv->iov_base + resv->iov_len;
1084 
1085 	svc_putnl(resv, 0);		/* ACCEPT */
1086 
1087 	rqstp->rq_prog = prog = svc_getnl(argv);	/* program number */
1088 	rqstp->rq_vers = vers = svc_getnl(argv);	/* version number */
1089 	rqstp->rq_proc = proc = svc_getnl(argv);	/* procedure number */
1090 
1091 	progp = serv->sv_program;
1092 
1093 	for (progp = serv->sv_program; progp; progp = progp->pg_next)
1094 		if (prog == progp->pg_prog)
1095 			break;
1096 
1097 	/*
1098 	 * Decode auth data, and add verifier to reply buffer.
1099 	 * We do this before anything else in order to get a decent
1100 	 * auth verifier.
1101 	 */
1102 	auth_res = svc_authenticate(rqstp, &auth_stat);
1103 	/* Also give the program a chance to reject this call: */
1104 	if (auth_res == SVC_OK && progp) {
1105 		auth_stat = rpc_autherr_badcred;
1106 		auth_res = progp->pg_authenticate(rqstp);
1107 	}
1108 	switch (auth_res) {
1109 	case SVC_OK:
1110 		break;
1111 	case SVC_GARBAGE:
1112 		goto err_garbage;
1113 	case SVC_SYSERR:
1114 		rpc_stat = rpc_system_err;
1115 		goto err_bad;
1116 	case SVC_DENIED:
1117 		goto err_bad_auth;
1118 	case SVC_CLOSE:
1119 		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1120 			svc_close_xprt(rqstp->rq_xprt);
1121 	case SVC_DROP:
1122 		goto dropit;
1123 	case SVC_COMPLETE:
1124 		goto sendit;
1125 	}
1126 
1127 	if (progp == NULL)
1128 		goto err_bad_prog;
1129 
1130 	if (vers >= progp->pg_nvers ||
1131 	  !(versp = progp->pg_vers[vers]))
1132 		goto err_bad_vers;
1133 
1134 	procp = versp->vs_proc + proc;
1135 	if (proc >= versp->vs_nproc || !procp->pc_func)
1136 		goto err_bad_proc;
1137 	rqstp->rq_procinfo = procp;
1138 
1139 	/* Syntactic check complete */
1140 	serv->sv_stats->rpccnt++;
1141 
1142 	/* Build the reply header. */
1143 	statp = resv->iov_base +resv->iov_len;
1144 	svc_putnl(resv, RPC_SUCCESS);
1145 
1146 	/* Bump per-procedure stats counter */
1147 	procp->pc_count++;
1148 
1149 	/* Initialize storage for argp and resp */
1150 	memset(rqstp->rq_argp, 0, procp->pc_argsize);
1151 	memset(rqstp->rq_resp, 0, procp->pc_ressize);
1152 
1153 	/* un-reserve some of the out-queue now that we have a
1154 	 * better idea of reply size
1155 	 */
1156 	if (procp->pc_xdrressize)
1157 		svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1158 
1159 	/* Call the function that processes the request. */
1160 	if (!versp->vs_dispatch) {
1161 		/* Decode arguments */
1162 		xdr = procp->pc_decode;
1163 		if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1164 			goto err_garbage;
1165 
1166 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1167 
1168 		/* Encode reply */
1169 		if (rqstp->rq_dropme) {
1170 			if (procp->pc_release)
1171 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1172 			goto dropit;
1173 		}
1174 		if (*statp == rpc_success &&
1175 		    (xdr = procp->pc_encode) &&
1176 		    !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1177 			dprintk("svc: failed to encode reply\n");
1178 			/* serv->sv_stats->rpcsystemerr++; */
1179 			*statp = rpc_system_err;
1180 		}
1181 	} else {
1182 		dprintk("svc: calling dispatcher\n");
1183 		if (!versp->vs_dispatch(rqstp, statp)) {
1184 			/* Release reply info */
1185 			if (procp->pc_release)
1186 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1187 			goto dropit;
1188 		}
1189 	}
1190 
1191 	/* Check RPC status result */
1192 	if (*statp != rpc_success)
1193 		resv->iov_len = ((void*)statp)  - resv->iov_base + 4;
1194 
1195 	/* Release reply info */
1196 	if (procp->pc_release)
1197 		procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1198 
1199 	if (procp->pc_encode == NULL)
1200 		goto dropit;
1201 
1202  sendit:
1203 	if (svc_authorise(rqstp))
1204 		goto dropit;
1205 	return 1;		/* Caller can now send it */
1206 
1207  dropit:
1208 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
1209 	dprintk("svc: svc_process dropit\n");
1210 	return 0;
1211 
1212 err_short_len:
1213 	svc_printk(rqstp, "short len %Zd, dropping request\n",
1214 			argv->iov_len);
1215 
1216 	goto dropit;			/* drop request */
1217 
1218 err_bad_rpc:
1219 	serv->sv_stats->rpcbadfmt++;
1220 	svc_putnl(resv, 1);	/* REJECT */
1221 	svc_putnl(resv, 0);	/* RPC_MISMATCH */
1222 	svc_putnl(resv, 2);	/* Only RPCv2 supported */
1223 	svc_putnl(resv, 2);
1224 	goto sendit;
1225 
1226 err_bad_auth:
1227 	dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1228 	serv->sv_stats->rpcbadauth++;
1229 	/* Restore write pointer to location of accept status: */
1230 	xdr_ressize_check(rqstp, reply_statp);
1231 	svc_putnl(resv, 1);	/* REJECT */
1232 	svc_putnl(resv, 1);	/* AUTH_ERROR */
1233 	svc_putnl(resv, ntohl(auth_stat));	/* status */
1234 	goto sendit;
1235 
1236 err_bad_prog:
1237 	dprintk("svc: unknown program %d\n", prog);
1238 	serv->sv_stats->rpcbadfmt++;
1239 	svc_putnl(resv, RPC_PROG_UNAVAIL);
1240 	goto sendit;
1241 
1242 err_bad_vers:
1243 	svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1244 		       vers, prog, progp->pg_name);
1245 
1246 	serv->sv_stats->rpcbadfmt++;
1247 	svc_putnl(resv, RPC_PROG_MISMATCH);
1248 	svc_putnl(resv, progp->pg_lovers);
1249 	svc_putnl(resv, progp->pg_hivers);
1250 	goto sendit;
1251 
1252 err_bad_proc:
1253 	svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1254 
1255 	serv->sv_stats->rpcbadfmt++;
1256 	svc_putnl(resv, RPC_PROC_UNAVAIL);
1257 	goto sendit;
1258 
1259 err_garbage:
1260 	svc_printk(rqstp, "failed to decode args\n");
1261 
1262 	rpc_stat = rpc_garbage_args;
1263 err_bad:
1264 	serv->sv_stats->rpcbadfmt++;
1265 	svc_putnl(resv, ntohl(rpc_stat));
1266 	goto sendit;
1267 }
1268 EXPORT_SYMBOL_GPL(svc_process);
1269 
1270 /*
1271  * Process the RPC request.
1272  */
1273 int
svc_process(struct svc_rqst * rqstp)1274 svc_process(struct svc_rqst *rqstp)
1275 {
1276 	struct kvec		*argv = &rqstp->rq_arg.head[0];
1277 	struct kvec		*resv = &rqstp->rq_res.head[0];
1278 	struct svc_serv		*serv = rqstp->rq_server;
1279 	u32			dir;
1280 
1281 	/*
1282 	 * Setup response xdr_buf.
1283 	 * Initially it has just one page
1284 	 */
1285 	rqstp->rq_resused = 1;
1286 	resv->iov_base = page_address(rqstp->rq_respages[0]);
1287 	resv->iov_len = 0;
1288 	rqstp->rq_res.pages = rqstp->rq_respages + 1;
1289 	rqstp->rq_res.len = 0;
1290 	rqstp->rq_res.page_base = 0;
1291 	rqstp->rq_res.page_len = 0;
1292 	rqstp->rq_res.buflen = PAGE_SIZE;
1293 	rqstp->rq_res.tail[0].iov_base = NULL;
1294 	rqstp->rq_res.tail[0].iov_len = 0;
1295 
1296 	rqstp->rq_xid = svc_getu32(argv);
1297 
1298 	dir  = svc_getnl(argv);
1299 	if (dir != 0) {
1300 		/* direction != CALL */
1301 		svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1302 		serv->sv_stats->rpcbadfmt++;
1303 		svc_drop(rqstp);
1304 		return 0;
1305 	}
1306 
1307 	/* Returns 1 for send, 0 for drop */
1308 	if (svc_process_common(rqstp, argv, resv))
1309 		return svc_send(rqstp);
1310 	else {
1311 		svc_drop(rqstp);
1312 		return 0;
1313 	}
1314 }
1315 
1316 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1317 /*
1318  * Process a backchannel RPC request that arrived over an existing
1319  * outbound connection
1320  */
1321 int
bc_svc_process(struct svc_serv * serv,struct rpc_rqst * req,struct svc_rqst * rqstp)1322 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1323 	       struct svc_rqst *rqstp)
1324 {
1325 	struct kvec	*argv = &rqstp->rq_arg.head[0];
1326 	struct kvec	*resv = &rqstp->rq_res.head[0];
1327 
1328 	/* Build the svc_rqst used by the common processing routine */
1329 	rqstp->rq_xprt = serv->sv_bc_xprt;
1330 	rqstp->rq_xid = req->rq_xid;
1331 	rqstp->rq_prot = req->rq_xprt->prot;
1332 	rqstp->rq_server = serv;
1333 
1334 	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1335 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1336 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1337 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1338 
1339 	/* reset result send buffer "put" position */
1340 	resv->iov_len = 0;
1341 
1342 	if (rqstp->rq_prot != IPPROTO_TCP) {
1343 		printk(KERN_ERR "No support for Non-TCP transports!\n");
1344 		BUG();
1345 	}
1346 
1347 	/*
1348 	 * Skip the next two words because they've already been
1349 	 * processed in the trasport
1350 	 */
1351 	svc_getu32(argv);	/* XID */
1352 	svc_getnl(argv);	/* CALLDIR */
1353 
1354 	/* Returns 1 for send, 0 for drop */
1355 	if (svc_process_common(rqstp, argv, resv)) {
1356 		memcpy(&req->rq_snd_buf, &rqstp->rq_res,
1357 						sizeof(req->rq_snd_buf));
1358 		return bc_send(req);
1359 	} else {
1360 		/* Nothing to do to drop request */
1361 		return 0;
1362 	}
1363 }
1364 EXPORT_SYMBOL_GPL(bc_svc_process);
1365 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1366 
1367 /*
1368  * Return (transport-specific) limit on the rpc payload.
1369  */
svc_max_payload(const struct svc_rqst * rqstp)1370 u32 svc_max_payload(const struct svc_rqst *rqstp)
1371 {
1372 	u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1373 
1374 	if (rqstp->rq_server->sv_max_payload < max)
1375 		max = rqstp->rq_server->sv_max_payload;
1376 	return max;
1377 }
1378 EXPORT_SYMBOL_GPL(svc_max_payload);
1379