1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/svc.c
5 * High-level RPC service routines
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
11 * by Greg Banks <gnb@melbourne.sgi.com>
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
32 #include <trace/events/sunrpc.h>
36 #define RPCDBG_FACILITY RPCDBG_SVCDSP
38 static void svc_unregister(const struct svc_serv *serv, struct net *net);
40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
43 * Mode for mapping cpus to pools.
46 SVC_POOL_AUTO = -1, /* choose one of the others */
47 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
48 * (legacy & UP mode) */
49 SVC_POOL_PERCPU, /* one pool per cpu */
50 SVC_POOL_PERNODE /* one pool per numa node */
54 * Structure for mapping cpus to pools and vice versa.
55 * Setup once during sunrpc initialisation.
59 int count; /* How many svc_servs use us */
60 int mode; /* Note: int not enum to avoid
61 * warnings about "enumeration value
62 * not handled in switch" */
64 unsigned int *pool_to; /* maps pool id to cpu or node */
65 unsigned int *to_pool; /* maps cpu or node to pool id */
68 static struct svc_pool_map svc_pool_map = {
69 .mode = SVC_POOL_DEFAULT
72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
75 param_set_pool_mode(const char *val, const struct kernel_param *kp)
77 int *ip = (int *)kp->arg;
78 struct svc_pool_map *m = &svc_pool_map;
81 mutex_lock(&svc_pool_map_mutex);
88 if (!strncmp(val, "auto", 4))
90 else if (!strncmp(val, "global", 6))
91 *ip = SVC_POOL_GLOBAL;
92 else if (!strncmp(val, "percpu", 6))
93 *ip = SVC_POOL_PERCPU;
94 else if (!strncmp(val, "pernode", 7))
95 *ip = SVC_POOL_PERNODE;
100 mutex_unlock(&svc_pool_map_mutex);
105 param_get_pool_mode(char *buf, const struct kernel_param *kp)
107 int *ip = (int *)kp->arg;
112 return strlcpy(buf, "auto\n", 20);
113 case SVC_POOL_GLOBAL:
114 return strlcpy(buf, "global\n", 20);
115 case SVC_POOL_PERCPU:
116 return strlcpy(buf, "percpu\n", 20);
117 case SVC_POOL_PERNODE:
118 return strlcpy(buf, "pernode\n", 20);
120 return sprintf(buf, "%d\n", *ip);
124 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
125 &svc_pool_map.mode, 0644);
128 * Detect best pool mapping mode heuristically,
129 * according to the machine's topology.
132 svc_pool_map_choose_mode(void)
136 if (nr_online_nodes > 1) {
138 * Actually have multiple NUMA nodes,
139 * so split pools on NUMA node boundaries
141 return SVC_POOL_PERNODE;
144 node = first_online_node;
145 if (nr_cpus_node(node) > 2) {
147 * Non-trivial SMP, or CONFIG_NUMA on
148 * non-NUMA hardware, e.g. with a generic
149 * x86_64 kernel on Xeons. In this case we
150 * want to divide the pools on cpu boundaries.
152 return SVC_POOL_PERCPU;
155 /* default: one global pool */
156 return SVC_POOL_GLOBAL;
160 * Allocate the to_pool[] and pool_to[] arrays.
161 * Returns 0 on success or an errno.
164 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
166 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
169 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
183 * Initialise the pool map for SVC_POOL_PERCPU mode.
184 * Returns number of pools or <0 on error.
187 svc_pool_map_init_percpu(struct svc_pool_map *m)
189 unsigned int maxpools = nr_cpu_ids;
190 unsigned int pidx = 0;
194 err = svc_pool_map_alloc_arrays(m, maxpools);
198 for_each_online_cpu(cpu) {
199 BUG_ON(pidx >= maxpools);
200 m->to_pool[cpu] = pidx;
201 m->pool_to[pidx] = cpu;
204 /* cpus brought online later all get mapped to pool0, sorry */
211 * Initialise the pool map for SVC_POOL_PERNODE mode.
212 * Returns number of pools or <0 on error.
215 svc_pool_map_init_pernode(struct svc_pool_map *m)
217 unsigned int maxpools = nr_node_ids;
218 unsigned int pidx = 0;
222 err = svc_pool_map_alloc_arrays(m, maxpools);
226 for_each_node_with_cpus(node) {
227 /* some architectures (e.g. SN2) have cpuless nodes */
228 BUG_ON(pidx > maxpools);
229 m->to_pool[node] = pidx;
230 m->pool_to[pidx] = node;
233 /* nodes brought online later all get mapped to pool0, sorry */
240 * Add a reference to the global map of cpus to pools (and
241 * vice versa) if pools are in use.
242 * Initialise the map if we're the first user.
243 * Returns the number of pools. If this is '1', no reference
247 svc_pool_map_get(void)
249 struct svc_pool_map *m = &svc_pool_map;
252 mutex_lock(&svc_pool_map_mutex);
255 mutex_unlock(&svc_pool_map_mutex);
256 WARN_ON_ONCE(m->npools <= 1);
260 if (m->mode == SVC_POOL_AUTO)
261 m->mode = svc_pool_map_choose_mode();
264 case SVC_POOL_PERCPU:
265 npools = svc_pool_map_init_percpu(m);
267 case SVC_POOL_PERNODE:
268 npools = svc_pool_map_init_pernode(m);
273 /* default, or memory allocation failure */
275 m->mode = SVC_POOL_GLOBAL;
280 /* service is unpooled, so doesn't hold a reference */
283 mutex_unlock(&svc_pool_map_mutex);
288 * Drop a reference to the global map of cpus to pools, if
289 * pools were in use, i.e. if npools > 1.
290 * When the last reference is dropped, the map data is
291 * freed; this allows the sysadmin to change the pool
292 * mode using the pool_mode module option without
293 * rebooting or re-loading sunrpc.ko.
296 svc_pool_map_put(int npools)
298 struct svc_pool_map *m = &svc_pool_map;
302 mutex_lock(&svc_pool_map_mutex);
312 mutex_unlock(&svc_pool_map_mutex);
315 static int svc_pool_map_get_node(unsigned int pidx)
317 const struct svc_pool_map *m = &svc_pool_map;
320 if (m->mode == SVC_POOL_PERCPU)
321 return cpu_to_node(m->pool_to[pidx]);
322 if (m->mode == SVC_POOL_PERNODE)
323 return m->pool_to[pidx];
328 * Set the given thread's cpus_allowed mask so that it
329 * will only run on cpus in the given pool.
332 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
334 struct svc_pool_map *m = &svc_pool_map;
335 unsigned int node = m->pool_to[pidx];
338 * The caller checks for sv_nrpools > 1, which
339 * implies that we've been initialized.
341 WARN_ON_ONCE(m->count == 0);
346 case SVC_POOL_PERCPU:
348 set_cpus_allowed_ptr(task, cpumask_of(node));
351 case SVC_POOL_PERNODE:
353 set_cpus_allowed_ptr(task, cpumask_of_node(node));
360 * Use the mapping mode to choose a pool for a given CPU.
361 * Used when enqueueing an incoming RPC. Always returns
362 * a non-NULL pool pointer.
365 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
367 struct svc_pool_map *m = &svc_pool_map;
368 unsigned int pidx = 0;
370 if (serv->sv_nrpools <= 1)
371 return serv->sv_pools;
374 case SVC_POOL_PERCPU:
375 pidx = m->to_pool[cpu];
377 case SVC_POOL_PERNODE:
378 pidx = m->to_pool[cpu_to_node(cpu)];
382 return &serv->sv_pools[pidx % serv->sv_nrpools];
385 int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
389 err = rpcb_create_local(net);
393 /* Remove any stale portmap registrations */
394 svc_unregister(serv, net);
397 EXPORT_SYMBOL_GPL(svc_rpcb_setup);
399 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
401 svc_unregister(serv, net);
404 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
406 static int svc_uses_rpcbind(struct svc_serv *serv)
408 struct svc_program *progp;
411 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
412 for (i = 0; i < progp->pg_nvers; i++) {
413 if (progp->pg_vers[i] == NULL)
415 if (!progp->pg_vers[i]->vs_hidden)
423 int svc_bind(struct svc_serv *serv, struct net *net)
425 if (!svc_uses_rpcbind(serv))
427 return svc_rpcb_setup(serv, net);
429 EXPORT_SYMBOL_GPL(svc_bind);
431 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
433 __svc_init_bc(struct svc_serv *serv)
435 INIT_LIST_HEAD(&serv->sv_cb_list);
436 spin_lock_init(&serv->sv_cb_lock);
437 init_waitqueue_head(&serv->sv_cb_waitq);
441 __svc_init_bc(struct svc_serv *serv)
447 * Create an RPC service
449 static struct svc_serv *
450 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
451 const struct svc_serv_ops *ops)
453 struct svc_serv *serv;
455 unsigned int xdrsize;
458 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
460 serv->sv_name = prog->pg_name;
461 serv->sv_program = prog;
462 kref_init(&serv->sv_refcnt);
463 serv->sv_stats = prog->pg_stats;
464 if (bufsize > RPCSVC_MAXPAYLOAD)
465 bufsize = RPCSVC_MAXPAYLOAD;
466 serv->sv_max_payload = bufsize? bufsize : 4096;
467 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
471 prog->pg_lovers = prog->pg_nvers-1;
472 for (vers=0; vers<prog->pg_nvers ; vers++)
473 if (prog->pg_vers[vers]) {
474 prog->pg_hivers = vers;
475 if (prog->pg_lovers > vers)
476 prog->pg_lovers = vers;
477 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
478 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
480 prog = prog->pg_next;
482 serv->sv_xdrsize = xdrsize;
483 INIT_LIST_HEAD(&serv->sv_tempsocks);
484 INIT_LIST_HEAD(&serv->sv_permsocks);
485 timer_setup(&serv->sv_temptimer, NULL, 0);
486 spin_lock_init(&serv->sv_lock);
490 serv->sv_nrpools = npools;
492 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
494 if (!serv->sv_pools) {
499 for (i = 0; i < serv->sv_nrpools; i++) {
500 struct svc_pool *pool = &serv->sv_pools[i];
502 dprintk("svc: initialising pool %u for %s\n",
506 INIT_LIST_HEAD(&pool->sp_sockets);
507 INIT_LIST_HEAD(&pool->sp_all_threads);
508 spin_lock_init(&pool->sp_lock);
515 svc_create(struct svc_program *prog, unsigned int bufsize,
516 const struct svc_serv_ops *ops)
518 return __svc_create(prog, bufsize, /*npools*/1, ops);
520 EXPORT_SYMBOL_GPL(svc_create);
523 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
524 const struct svc_serv_ops *ops)
526 struct svc_serv *serv;
527 unsigned int npools = svc_pool_map_get();
529 serv = __svc_create(prog, bufsize, npools, ops);
534 svc_pool_map_put(npools);
537 EXPORT_SYMBOL_GPL(svc_create_pooled);
539 void svc_shutdown_net(struct svc_serv *serv, struct net *net)
541 svc_close_net(serv, net);
543 if (serv->sv_ops->svo_shutdown)
544 serv->sv_ops->svo_shutdown(serv, net);
546 EXPORT_SYMBOL_GPL(svc_shutdown_net);
549 * Destroy an RPC service. Should be called with appropriate locking to
550 * protect sv_permsocks and sv_tempsocks.
553 svc_destroy(struct kref *ref)
555 struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
557 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
558 del_timer_sync(&serv->sv_temptimer);
561 * The last user is gone and thus all sockets have to be destroyed to
562 * the point. Check this.
564 BUG_ON(!list_empty(&serv->sv_permsocks));
565 BUG_ON(!list_empty(&serv->sv_tempsocks));
567 cache_clean_deferred(serv);
569 svc_pool_map_put(serv->sv_nrpools);
571 kfree(serv->sv_pools);
574 EXPORT_SYMBOL_GPL(svc_destroy);
577 * Allocate an RPC server's buffer space.
578 * We allocate pages and place them in rq_pages.
581 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
583 unsigned int pages, arghi;
585 /* bc_xprt uses fore channel allocated buffers */
586 if (svc_is_backchannel(rqstp))
589 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
590 * We assume one is at most one page
593 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
594 if (pages > RPCSVC_MAXPAGES)
595 pages = RPCSVC_MAXPAGES;
597 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
600 rqstp->rq_pages[arghi++] = p;
607 * Release an RPC server buffer
610 svc_release_buffer(struct svc_rqst *rqstp)
614 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
615 if (rqstp->rq_pages[i])
616 put_page(rqstp->rq_pages[i]);
620 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
622 struct svc_rqst *rqstp;
624 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
628 __set_bit(RQ_BUSY, &rqstp->rq_flags);
629 spin_lock_init(&rqstp->rq_lock);
630 rqstp->rq_server = serv;
631 rqstp->rq_pool = pool;
633 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0);
634 if (!rqstp->rq_scratch_page)
637 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
641 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
645 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
650 svc_rqst_free(rqstp);
653 EXPORT_SYMBOL_GPL(svc_rqst_alloc);
655 static struct svc_rqst *
656 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
658 struct svc_rqst *rqstp;
660 rqstp = svc_rqst_alloc(serv, pool, node);
662 return ERR_PTR(-ENOMEM);
665 spin_lock_bh(&serv->sv_lock);
666 serv->sv_nrthreads += 1;
667 spin_unlock_bh(&serv->sv_lock);
669 spin_lock_bh(&pool->sp_lock);
670 pool->sp_nrthreads++;
671 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
672 spin_unlock_bh(&pool->sp_lock);
677 * Choose a pool in which to create a new thread, for svc_set_num_threads
679 static inline struct svc_pool *
680 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
685 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
689 * Choose a thread to kill, for svc_set_num_threads
691 static inline struct task_struct *
692 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
695 struct task_struct *task = NULL;
698 spin_lock_bh(&pool->sp_lock);
700 /* choose a pool in round-robin fashion */
701 for (i = 0; i < serv->sv_nrpools; i++) {
702 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
703 spin_lock_bh(&pool->sp_lock);
704 if (!list_empty(&pool->sp_all_threads))
706 spin_unlock_bh(&pool->sp_lock);
712 if (!list_empty(&pool->sp_all_threads)) {
713 struct svc_rqst *rqstp;
716 * Remove from the pool->sp_all_threads list
717 * so we don't try to kill it again.
719 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
720 set_bit(RQ_VICTIM, &rqstp->rq_flags);
721 list_del_rcu(&rqstp->rq_all);
722 task = rqstp->rq_task;
724 spin_unlock_bh(&pool->sp_lock);
729 /* create new threads */
731 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
733 struct svc_rqst *rqstp;
734 struct task_struct *task;
735 struct svc_pool *chosen_pool;
736 unsigned int state = serv->sv_nrthreads-1;
741 chosen_pool = choose_pool(serv, pool, &state);
743 node = svc_pool_map_get_node(chosen_pool->sp_id);
744 rqstp = svc_prepare_thread(serv, chosen_pool, node);
746 return PTR_ERR(rqstp);
748 __module_get(serv->sv_ops->svo_module);
749 task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
750 node, "%s", serv->sv_name);
752 module_put(serv->sv_ops->svo_module);
753 svc_exit_thread(rqstp);
754 return PTR_ERR(task);
757 rqstp->rq_task = task;
758 if (serv->sv_nrpools > 1)
759 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
761 svc_sock_update_bufs(serv);
762 wake_up_process(task);
763 } while (nrservs > 0);
769 * Create or destroy enough new threads to make the number
770 * of threads the given number. If `pool' is non-NULL, applies
771 * only to threads in that pool, otherwise round-robins between
772 * all pools. Caller must ensure that mutual exclusion between this and
773 * server startup or shutdown.
776 /* destroy old threads */
778 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
780 struct task_struct *task;
781 unsigned int state = serv->sv_nrthreads-1;
783 /* destroy old threads */
785 task = choose_victim(serv, pool, &state);
790 } while (nrservs < 0);
795 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
798 nrservs -= serv->sv_nrthreads;
800 spin_lock_bh(&pool->sp_lock);
801 nrservs -= pool->sp_nrthreads;
802 spin_unlock_bh(&pool->sp_lock);
806 return svc_start_kthreads(serv, pool, nrservs);
808 return svc_stop_kthreads(serv, pool, nrservs);
811 EXPORT_SYMBOL_GPL(svc_set_num_threads);
814 * svc_rqst_replace_page - Replace one page in rq_pages[]
815 * @rqstp: svc_rqst with pages to replace
816 * @page: replacement page
818 * When replacing a page in rq_pages, batch the release of the
819 * replaced pages to avoid hammering the page allocator.
821 void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
823 if (*rqstp->rq_next_page) {
824 if (!pagevec_space(&rqstp->rq_pvec))
825 __pagevec_release(&rqstp->rq_pvec);
826 pagevec_add(&rqstp->rq_pvec, *rqstp->rq_next_page);
830 *(rqstp->rq_next_page++) = page;
832 EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
835 * Called from a server thread as it's exiting. Caller must hold the "service
836 * mutex" for the service.
839 svc_rqst_free(struct svc_rqst *rqstp)
841 svc_release_buffer(rqstp);
842 if (rqstp->rq_scratch_page)
843 put_page(rqstp->rq_scratch_page);
844 kfree(rqstp->rq_resp);
845 kfree(rqstp->rq_argp);
846 kfree(rqstp->rq_auth_data);
847 kfree_rcu(rqstp, rq_rcu_head);
849 EXPORT_SYMBOL_GPL(svc_rqst_free);
852 svc_exit_thread(struct svc_rqst *rqstp)
854 struct svc_serv *serv = rqstp->rq_server;
855 struct svc_pool *pool = rqstp->rq_pool;
857 spin_lock_bh(&pool->sp_lock);
858 pool->sp_nrthreads--;
859 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
860 list_del_rcu(&rqstp->rq_all);
861 spin_unlock_bh(&pool->sp_lock);
863 spin_lock_bh(&serv->sv_lock);
864 serv->sv_nrthreads -= 1;
865 spin_unlock_bh(&serv->sv_lock);
866 svc_sock_update_bufs(serv);
868 svc_rqst_free(rqstp);
872 EXPORT_SYMBOL_GPL(svc_exit_thread);
875 * Register an "inet" protocol family netid with the local
876 * rpcbind daemon via an rpcbind v4 SET request.
878 * No netconfig infrastructure is available in the kernel, so
879 * we map IP_ protocol numbers to netids by hand.
881 * Returns zero on success; a negative errno value is returned
882 * if any error occurs.
884 static int __svc_rpcb_register4(struct net *net, const u32 program,
886 const unsigned short protocol,
887 const unsigned short port)
889 const struct sockaddr_in sin = {
890 .sin_family = AF_INET,
891 .sin_addr.s_addr = htonl(INADDR_ANY),
892 .sin_port = htons(port),
899 netid = RPCBIND_NETID_UDP;
902 netid = RPCBIND_NETID_TCP;
908 error = rpcb_v4_register(net, program, version,
909 (const struct sockaddr *)&sin, netid);
912 * User space didn't support rpcbind v4, so retry this
913 * registration request with the legacy rpcbind v2 protocol.
915 if (error == -EPROTONOSUPPORT)
916 error = rpcb_register(net, program, version, protocol, port);
921 #if IS_ENABLED(CONFIG_IPV6)
923 * Register an "inet6" protocol family netid with the local
924 * rpcbind daemon via an rpcbind v4 SET request.
926 * No netconfig infrastructure is available in the kernel, so
927 * we map IP_ protocol numbers to netids by hand.
929 * Returns zero on success; a negative errno value is returned
930 * if any error occurs.
932 static int __svc_rpcb_register6(struct net *net, const u32 program,
934 const unsigned short protocol,
935 const unsigned short port)
937 const struct sockaddr_in6 sin6 = {
938 .sin6_family = AF_INET6,
939 .sin6_addr = IN6ADDR_ANY_INIT,
940 .sin6_port = htons(port),
947 netid = RPCBIND_NETID_UDP6;
950 netid = RPCBIND_NETID_TCP6;
956 error = rpcb_v4_register(net, program, version,
957 (const struct sockaddr *)&sin6, netid);
960 * User space didn't support rpcbind version 4, so we won't
961 * use a PF_INET6 listener.
963 if (error == -EPROTONOSUPPORT)
964 error = -EAFNOSUPPORT;
968 #endif /* IS_ENABLED(CONFIG_IPV6) */
971 * Register a kernel RPC service via rpcbind version 4.
973 * Returns zero on success; a negative errno value is returned
974 * if any error occurs.
976 static int __svc_register(struct net *net, const char *progname,
977 const u32 program, const u32 version,
979 const unsigned short protocol,
980 const unsigned short port)
982 int error = -EAFNOSUPPORT;
986 error = __svc_rpcb_register4(net, program, version,
989 #if IS_ENABLED(CONFIG_IPV6)
991 error = __svc_rpcb_register6(net, program, version,
996 trace_svc_register(progname, version, protocol, port, family, error);
1000 int svc_rpcbind_set_version(struct net *net,
1001 const struct svc_program *progp,
1002 u32 version, int family,
1003 unsigned short proto,
1004 unsigned short port)
1006 return __svc_register(net, progp->pg_name, progp->pg_prog,
1007 version, family, proto, port);
1010 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1012 int svc_generic_rpcbind_set(struct net *net,
1013 const struct svc_program *progp,
1014 u32 version, int family,
1015 unsigned short proto,
1016 unsigned short port)
1018 const struct svc_version *vers = progp->pg_vers[version];
1024 if (vers->vs_hidden) {
1025 trace_svc_noregister(progp->pg_name, version, proto,
1031 * Don't register a UDP port if we need congestion
1034 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1037 error = svc_rpcbind_set_version(net, progp, version,
1038 family, proto, port);
1040 return (vers->vs_rpcb_optnl) ? 0 : error;
1042 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1045 * svc_register - register an RPC service with the local portmapper
1046 * @serv: svc_serv struct for the service to register
1047 * @net: net namespace for the service to register
1048 * @family: protocol family of service's listener socket
1049 * @proto: transport protocol number to advertise
1050 * @port: port to advertise
1052 * Service is registered for any address in the passed-in protocol family
1054 int svc_register(const struct svc_serv *serv, struct net *net,
1055 const int family, const unsigned short proto,
1056 const unsigned short port)
1058 struct svc_program *progp;
1062 WARN_ON_ONCE(proto == 0 && port == 0);
1063 if (proto == 0 && port == 0)
1066 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1067 for (i = 0; i < progp->pg_nvers; i++) {
1069 error = progp->pg_rpcbind_set(net, progp, i,
1070 family, proto, port);
1072 printk(KERN_WARNING "svc: failed to register "
1073 "%sv%u RPC service (errno %d).\n",
1074 progp->pg_name, i, -error);
1084 * If user space is running rpcbind, it should take the v4 UNSET
1085 * and clear everything for this [program, version]. If user space
1086 * is running portmap, it will reject the v4 UNSET, but won't have
1087 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1088 * in this case to clear all existing entries for [program, version].
1090 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1091 const char *progname)
1095 error = rpcb_v4_register(net, program, version, NULL, "");
1098 * User space didn't support rpcbind v4, so retry this
1099 * request with the legacy rpcbind v2 protocol.
1101 if (error == -EPROTONOSUPPORT)
1102 error = rpcb_register(net, program, version, 0, 0);
1104 trace_svc_unregister(progname, version, error);
1108 * All netids, bind addresses and ports registered for [program, version]
1109 * are removed from the local rpcbind database (if the service is not
1110 * hidden) to make way for a new instance of the service.
1112 * The result of unregistration is reported via dprintk for those who want
1113 * verification of the result, but is otherwise not important.
1115 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1117 struct svc_program *progp;
1118 unsigned long flags;
1121 clear_thread_flag(TIF_SIGPENDING);
1123 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1124 for (i = 0; i < progp->pg_nvers; i++) {
1125 if (progp->pg_vers[i] == NULL)
1127 if (progp->pg_vers[i]->vs_hidden)
1129 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1133 spin_lock_irqsave(¤t->sighand->siglock, flags);
1134 recalc_sigpending();
1135 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
1139 * dprintk the given error with the address of the client that caused it.
1141 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1142 static __printf(2, 3)
1143 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1145 struct va_format vaf;
1147 char buf[RPC_MAX_ADDRBUFLEN];
1149 va_start(args, fmt);
1154 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1159 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1163 svc_generic_init_request(struct svc_rqst *rqstp,
1164 const struct svc_program *progp,
1165 struct svc_process_info *ret)
1167 const struct svc_version *versp = NULL; /* compiler food */
1168 const struct svc_procedure *procp = NULL;
1170 if (rqstp->rq_vers >= progp->pg_nvers )
1172 versp = progp->pg_vers[rqstp->rq_vers];
1177 * Some protocol versions (namely NFSv4) require some form of
1178 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1179 * In other words, UDP is not allowed. We mark those when setting
1180 * up the svc_xprt, and verify that here.
1182 * The spec is not very clear about what error should be returned
1183 * when someone tries to access a server that is listening on UDP
1184 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1187 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1188 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1191 if (rqstp->rq_proc >= versp->vs_nproc)
1193 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1197 /* Initialize storage for argp and resp */
1198 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1199 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1201 /* Bump per-procedure stats counter */
1202 versp->vs_count[rqstp->rq_proc]++;
1204 ret->dispatch = versp->vs_dispatch;
1207 ret->mismatch.lovers = progp->pg_lovers;
1208 ret->mismatch.hivers = progp->pg_hivers;
1209 return rpc_prog_mismatch;
1211 return rpc_proc_unavail;
1213 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1216 * Common routine for processing the RPC request.
1219 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1221 struct svc_program *progp;
1222 const struct svc_procedure *procp = NULL;
1223 struct svc_serv *serv = rqstp->rq_server;
1224 struct svc_process_info process;
1229 __be32 *reply_statp;
1231 rpc_stat = rpc_success;
1233 if (argv->iov_len < 6*4)
1236 /* Will be turned off by GSS integrity and privacy services */
1237 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1238 /* Will be turned off only when NFSv4 Sessions are used */
1239 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1240 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1242 svc_putu32(resv, rqstp->rq_xid);
1244 vers = svc_getnl(argv);
1246 /* First words of reply: */
1247 svc_putnl(resv, 1); /* REPLY */
1249 if (vers != 2) /* RPC version number */
1252 /* Save position in case we later decide to reject: */
1253 reply_statp = resv->iov_base + resv->iov_len;
1255 svc_putnl(resv, 0); /* ACCEPT */
1257 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1258 rqstp->rq_vers = svc_getnl(argv); /* version number */
1259 rqstp->rq_proc = svc_getnl(argv); /* procedure number */
1261 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1262 if (prog == progp->pg_prog)
1266 * Decode auth data, and add verifier to reply buffer.
1267 * We do this before anything else in order to get a decent
1270 auth_res = svc_authenticate(rqstp);
1271 /* Also give the program a chance to reject this call: */
1272 if (auth_res == SVC_OK && progp)
1273 auth_res = progp->pg_authenticate(rqstp);
1274 if (auth_res != SVC_OK)
1275 trace_svc_authenticate(rqstp, auth_res);
1282 rpc_stat = rpc_system_err;
1297 rpc_stat = progp->pg_init_request(rqstp, progp, &process);
1301 case rpc_prog_unavail:
1303 case rpc_prog_mismatch:
1305 case rpc_proc_unavail:
1309 procp = rqstp->rq_procinfo;
1310 /* Should this check go into the dispatcher? */
1311 if (!procp || !procp->pc_func)
1314 /* Syntactic check complete */
1315 serv->sv_stats->rpccnt++;
1316 trace_svc_process(rqstp, progp->pg_name);
1318 /* Build the reply header. */
1319 statp = resv->iov_base +resv->iov_len;
1320 svc_putnl(resv, RPC_SUCCESS);
1322 /* un-reserve some of the out-queue now that we have a
1323 * better idea of reply size
1325 if (procp->pc_xdrressize)
1326 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1328 /* Call the function that processes the request. */
1329 rc = process.dispatch(rqstp, statp);
1330 if (procp->pc_release)
1331 procp->pc_release(rqstp);
1334 if (rqstp->rq_auth_stat != rpc_auth_ok)
1337 /* Check RPC status result */
1338 if (*statp != rpc_success)
1339 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1341 if (procp->pc_encode == NULL)
1345 if (svc_authorise(rqstp))
1347 return 1; /* Caller can now send it */
1350 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1351 dprintk("svc: svc_process dropit\n");
1355 svc_authorise(rqstp);
1357 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1358 svc_close_xprt(rqstp->rq_xprt);
1359 dprintk("svc: svc_process close\n");
1363 svc_printk(rqstp, "short len %zd, dropping request\n",
1368 serv->sv_stats->rpcbadfmt++;
1369 svc_putnl(resv, 1); /* REJECT */
1370 svc_putnl(resv, 0); /* RPC_MISMATCH */
1371 svc_putnl(resv, 2); /* Only RPCv2 supported */
1376 dprintk("svc: authentication failed (%d)\n",
1377 be32_to_cpu(rqstp->rq_auth_stat));
1378 serv->sv_stats->rpcbadauth++;
1379 /* Restore write pointer to location of accept status: */
1380 xdr_ressize_check(rqstp, reply_statp);
1381 svc_putnl(resv, 1); /* REJECT */
1382 svc_putnl(resv, 1); /* AUTH_ERROR */
1383 svc_putu32(resv, rqstp->rq_auth_stat); /* status */
1387 dprintk("svc: unknown program %d\n", prog);
1388 serv->sv_stats->rpcbadfmt++;
1389 svc_putnl(resv, RPC_PROG_UNAVAIL);
1393 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1394 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1396 serv->sv_stats->rpcbadfmt++;
1397 svc_putnl(resv, RPC_PROG_MISMATCH);
1398 svc_putnl(resv, process.mismatch.lovers);
1399 svc_putnl(resv, process.mismatch.hivers);
1403 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1405 serv->sv_stats->rpcbadfmt++;
1406 svc_putnl(resv, RPC_PROC_UNAVAIL);
1410 svc_printk(rqstp, "failed to decode args\n");
1412 rpc_stat = rpc_garbage_args;
1414 serv->sv_stats->rpcbadfmt++;
1415 svc_putnl(resv, ntohl(rpc_stat));
1420 * Process the RPC request.
1423 svc_process(struct svc_rqst *rqstp)
1425 struct kvec *argv = &rqstp->rq_arg.head[0];
1426 struct kvec *resv = &rqstp->rq_res.head[0];
1427 struct svc_serv *serv = rqstp->rq_server;
1430 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
1431 if (!fail_sunrpc.ignore_server_disconnect &&
1432 should_fail(&fail_sunrpc.attr, 1))
1433 svc_xprt_deferred_close(rqstp->rq_xprt);
1437 * Setup response xdr_buf.
1438 * Initially it has just one page
1440 rqstp->rq_next_page = &rqstp->rq_respages[1];
1441 resv->iov_base = page_address(rqstp->rq_respages[0]);
1443 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1444 rqstp->rq_res.len = 0;
1445 rqstp->rq_res.page_base = 0;
1446 rqstp->rq_res.page_len = 0;
1447 rqstp->rq_res.buflen = PAGE_SIZE;
1448 rqstp->rq_res.tail[0].iov_base = NULL;
1449 rqstp->rq_res.tail[0].iov_len = 0;
1451 dir = svc_getnl(argv);
1453 /* direction != CALL */
1454 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1455 serv->sv_stats->rpcbadfmt++;
1459 /* Returns 1 for send, 0 for drop */
1460 if (likely(svc_process_common(rqstp, argv, resv)))
1461 return svc_send(rqstp);
1467 EXPORT_SYMBOL_GPL(svc_process);
1469 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1471 * Process a backchannel RPC request that arrived over an existing
1472 * outbound connection
1475 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1476 struct svc_rqst *rqstp)
1478 struct kvec *argv = &rqstp->rq_arg.head[0];
1479 struct kvec *resv = &rqstp->rq_res.head[0];
1480 struct rpc_task *task;
1484 dprintk("svc: %s(%p)\n", __func__, req);
1486 /* Build the svc_rqst used by the common processing routine */
1487 rqstp->rq_xid = req->rq_xid;
1488 rqstp->rq_prot = req->rq_xprt->prot;
1489 rqstp->rq_server = serv;
1490 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1492 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1493 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1494 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1495 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1497 /* Adjust the argument buffer length */
1498 rqstp->rq_arg.len = req->rq_private_buf.len;
1499 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1500 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1501 rqstp->rq_arg.page_len = 0;
1502 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1503 rqstp->rq_arg.page_len)
1504 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1505 rqstp->rq_arg.head[0].iov_len;
1507 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1508 rqstp->rq_arg.page_len;
1510 /* reset result send buffer "put" position */
1514 * Skip the next two words because they've already been
1515 * processed in the transport
1517 svc_getu32(argv); /* XID */
1518 svc_getnl(argv); /* CALLDIR */
1520 /* Parse and execute the bc call */
1521 proc_error = svc_process_common(rqstp, argv, resv);
1523 atomic_dec(&req->rq_xprt->bc_slot_count);
1525 /* Processing error: drop the request */
1526 xprt_free_bc_request(req);
1530 /* Finally, send the reply synchronously */
1531 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1532 task = rpc_run_bc_task(req);
1534 error = PTR_ERR(task);
1538 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1539 error = task->tk_status;
1543 dprintk("svc: %s(), error=%d\n", __func__, error);
1546 EXPORT_SYMBOL_GPL(bc_svc_process);
1547 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1550 * Return (transport-specific) limit on the rpc payload.
1552 u32 svc_max_payload(const struct svc_rqst *rqstp)
1554 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1556 if (rqstp->rq_server->sv_max_payload < max)
1557 max = rqstp->rq_server->sv_max_payload;
1560 EXPORT_SYMBOL_GPL(svc_max_payload);
1563 * svc_proc_name - Return RPC procedure name in string form
1564 * @rqstp: svc_rqst to operate on
1567 * Pointer to a NUL-terminated string
1569 const char *svc_proc_name(const struct svc_rqst *rqstp)
1571 if (rqstp && rqstp->rq_procinfo)
1572 return rqstp->rq_procinfo->pc_name;
1578 * svc_encode_result_payload - mark a range of bytes as a result payload
1579 * @rqstp: svc_rqst to operate on
1580 * @offset: payload's byte offset in rqstp->rq_res
1581 * @length: size of payload, in bytes
1583 * Returns zero on success, or a negative errno if a permanent
1586 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1587 unsigned int length)
1589 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset,
1592 EXPORT_SYMBOL_GPL(svc_encode_result_payload);
1595 * svc_fill_write_vector - Construct data argument for VFS write call
1596 * @rqstp: svc_rqst to operate on
1597 * @payload: xdr_buf containing only the write data payload
1599 * Fills in rqstp::rq_vec, and returns the number of elements.
1601 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
1602 struct xdr_buf *payload)
1604 struct page **pages = payload->pages;
1605 struct kvec *first = payload->head;
1606 struct kvec *vec = rqstp->rq_vec;
1607 size_t total = payload->len;
1610 /* Some types of transport can present the write payload
1611 * entirely in rq_arg.pages. In this case, @first is empty.
1614 if (first->iov_len) {
1615 vec[i].iov_base = first->iov_base;
1616 vec[i].iov_len = min_t(size_t, total, first->iov_len);
1617 total -= vec[i].iov_len;
1622 vec[i].iov_base = page_address(*pages);
1623 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1624 total -= vec[i].iov_len;
1629 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1632 EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1635 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1636 * @rqstp: svc_rqst to operate on
1637 * @first: buffer containing first section of pathname
1638 * @p: buffer containing remaining section of pathname
1639 * @total: total length of the pathname argument
1641 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1642 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1643 * the returned string.
1645 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1646 void *p, size_t total)
1648 size_t len, remaining;
1651 result = kmalloc(total + 1, GFP_KERNEL);
1653 return ERR_PTR(-ESERVERFAULT);
1658 len = min_t(size_t, total, first->iov_len);
1660 memcpy(dst, first->iov_base, len);
1666 len = min_t(size_t, remaining, PAGE_SIZE);
1667 memcpy(dst, p, len);
1673 /* Sanity check: Linux doesn't allow the pathname argument to
1674 * contain a NUL byte.
1676 if (strlen(result) != total) {
1678 return ERR_PTR(-EINVAL);
1682 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);