cregit-Linux how code gets into the kernel

Release 4.11 net/sunrpc/svc.c

Directory: net/sunrpc
/*
 * linux/net/sunrpc/svc.c
 *
 * High-level RPC service routines
 *
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 *
 * Multiple threads pools and NUMAisation
 * Copyright (c) 2006 Silicon Graphics, Inc.
 * by Greg Banks <gnb@melbourne.sgi.com>
 */

#include <linux/linkage.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/slab.h>

#include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/bc_xprt.h>

#include <trace/events/sunrpc.h>


#define RPCDBG_FACILITY	RPCDBG_SVCDSP

static void svc_unregister(const struct svc_serv *serv, struct net *net);


#define svc_serv_is_pooled(serv)    ((serv)->sv_ops->svo_function)


#define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL

/*
 * Structure for mapping cpus to pools and vice versa.
 * Setup once during sunrpc initialisation.
 */

struct svc_pool_map svc_pool_map = {
	.mode = SVC_POOL_DEFAULT
};

EXPORT_SYMBOL_GPL(svc_pool_map);

static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */


static int param_set_pool_mode(const char *val, struct kernel_param *kp) { int *ip = (int *)kp->arg; struct svc_pool_map *m = &svc_pool_map; int err; mutex_lock(&svc_pool_map_mutex); err = -EBUSY; if (m->count) goto out; err = 0; if (!strncmp(val, "auto", 4)) *ip = SVC_POOL_AUTO; else if (!strncmp(val, "global", 6)) *ip = SVC_POOL_GLOBAL; else if (!strncmp(val, "percpu", 6)) *ip = SVC_POOL_PERCPU; else if (!strncmp(val, "pernode", 7)) *ip = SVC_POOL_PERNODE; else err = -EINVAL; out: mutex_unlock(&svc_pool_map_mutex); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks151100.00%1100.00%
Total151100.00%1100.00%


static int param_get_pool_mode(char *buf, struct kernel_param *kp) { int *ip = (int *)kp->arg; switch (*ip) { case SVC_POOL_AUTO: return strlcpy(buf, "auto", 20); case SVC_POOL_GLOBAL: return strlcpy(buf, "global", 20); case SVC_POOL_PERCPU: return strlcpy(buf, "percpu", 20); case SVC_POOL_PERNODE: return strlcpy(buf, "pernode", 20); default: return sprintf(buf, "%d", *ip); } }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks98100.00%1100.00%
Total98100.00%1100.00%

module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, &svc_pool_map.mode, 0644); /* * Detect best pool mapping mode heuristically, * according to the machine's topology. */
static int svc_pool_map_choose_mode(void) { unsigned int node; if (nr_online_nodes > 1) { /* * Actually have multiple NUMA nodes, * so split pools on NUMA node boundaries */ return SVC_POOL_PERNODE; } node = first_online_node; if (nr_cpus_node(node) > 2) { /* * Non-trivial SMP, or CONFIG_NUMA on * non-NUMA hardware, e.g. with a generic * x86_64 kernel on Xeons. In this case we * want to divide the pools on cpu boundaries. */ return SVC_POOL_PERCPU; } /* default: one global pool */ return SVC_POOL_GLOBAL; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks4595.74%133.33%
H Hartley Sweeten12.13%133.33%
Christoph Lameter12.13%133.33%
Total47100.00%3100.00%

/* * Allocate the to_pool[] and pool_to[] arrays. * Returns 0 on success or an errno. */
static int svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) { m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->to_pool) goto fail; m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->pool_to) goto fail_free; return 0; fail_free: kfree(m->to_pool); m->to_pool = NULL; fail: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks8793.55%150.00%
J. Bruce Fields66.45%150.00%
Total93100.00%2100.00%

/* * Initialise the pool map for SVC_POOL_PERCPU mode. * Returns number of pools or <0 on error. */
static int svc_pool_map_init_percpu(struct svc_pool_map *m) { unsigned int maxpools = nr_cpu_ids; unsigned int pidx = 0; unsigned int cpu; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_online_cpu(cpu) { BUG_ON(pidx >= maxpools); m->to_pool[cpu] = pidx; m->pool_to[pidx] = cpu; pidx++; } /* cpus brought online later all get mapped to pool0, sorry */ return pidx; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks8297.62%133.33%
Christoph Lameter11.19%133.33%
Dan Carpenter11.19%133.33%
Total84100.00%3100.00%

; /* * Initialise the pool map for SVC_POOL_PERNODE mode. * Returns number of pools or <0 on error. */
static int svc_pool_map_init_pernode(struct svc_pool_map *m) { unsigned int maxpools = nr_node_ids; unsigned int pidx = 0; unsigned int node; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_node_with_cpus(node) { /* some architectures (e.g. SN2) have cpuless nodes */ BUG_ON(pidx > maxpools); m->to_pool[node] = pidx; m->pool_to[pidx] = node; pidx++; } /* nodes brought online later all get mapped to pool0, sorry */ return pidx; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks8498.82%150.00%
Christoph Lameter11.18%150.00%
Total85100.00%2100.00%

/* * Add a reference to the global map of cpus to pools (and * vice versa). Initialise the map if we're the first user. * Returns the number of pools. */
unsigned int svc_pool_map_get(void) { struct svc_pool_map *m = &svc_pool_map; int npools = -1; mutex_lock(&svc_pool_map_mutex); if (m->count++) { mutex_unlock(&svc_pool_map_mutex); return m->npools; } if (m->mode == SVC_POOL_AUTO) m->mode = svc_pool_map_choose_mode(); switch (m->mode) { case SVC_POOL_PERCPU: npools = svc_pool_map_init_percpu(m); break; case SVC_POOL_PERNODE: npools = svc_pool_map_init_pernode(m); break; } if (npools < 0) { /* default, or memory allocation failure */ npools = 1; m->mode = SVC_POOL_GLOBAL; } m->npools = npools; mutex_unlock(&svc_pool_map_mutex); return m->npools; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks129100.00%2100.00%
Total129100.00%2100.00%

EXPORT_SYMBOL_GPL(svc_pool_map_get); /* * Drop a reference to the global map of cpus to pools. * When the last reference is dropped, the map data is * freed; this allows the sysadmin to change the pool * mode using the pool_mode module option without * rebooting or re-loading sunrpc.ko. */
void svc_pool_map_put(void) { struct svc_pool_map *m = &svc_pool_map; mutex_lock(&svc_pool_map_mutex); if (!--m->count) { kfree(m->to_pool); m->to_pool = NULL; kfree(m->pool_to); m->pool_to = NULL; m->npools = 0; } mutex_unlock(&svc_pool_map_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks5782.61%150.00%
J. Bruce Fields1217.39%150.00%
Total69100.00%2100.00%

EXPORT_SYMBOL_GPL(svc_pool_map_put);
static int svc_pool_map_get_node(unsigned int pidx) { const struct svc_pool_map *m = &svc_pool_map; if (m->count) { if (m->mode == SVC_POOL_PERCPU) return cpu_to_node(m->pool_to[pidx]); if (m->mode == SVC_POOL_PERNODE) return m->pool_to[pidx]; } return NUMA_NO_NODE; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet65100.00%1100.00%
Total65100.00%1100.00%

/* * Set the given thread's cpus_allowed mask so that it * will only run on cpus in the given pool. */
static inline void svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) { struct svc_pool_map *m = &svc_pool_map; unsigned int node = m->pool_to[pidx]; /* * The caller checks for sv_nrpools > 1, which * implies that we've been initialized. */ WARN_ON_ONCE(m->count == 0); if (m->count == 0) return; switch (m->mode) { case SVC_POOL_PERCPU: { set_cpus_allowed_ptr(task, cpumask_of(node)); break; } case SVC_POOL_PERNODE: { set_cpus_allowed_ptr(task, cpumask_of_node(node)); break; } } }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks4952.13%222.22%
Jeff Layton2021.28%111.11%
Weston Andros Adamson1010.64%111.11%
Mike Travis99.57%222.22%
Rusty Russell55.32%222.22%
Ingo Molnar11.06%111.11%
Total94100.00%9100.00%

/* * Use the mapping mode to choose a pool for a given CPU. * Used when enqueueing an incoming RPC. Always returns * a non-NULL pool pointer. */
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu) { struct svc_pool_map *m = &svc_pool_map; unsigned int pidx = 0; /* * An uninitialised map happens in a pure client when * lockd is brought up, so silently treat it the * same as SVC_POOL_GLOBAL. */ if (svc_serv_is_pooled(serv)) { switch (m->mode) { case SVC_POOL_PERCPU: pidx = m->to_pool[cpu]; break; case SVC_POOL_PERNODE: pidx = m->to_pool[cpu_to_node(cpu)]; break; } } return &serv->sv_pools[pidx % serv->sv_nrpools]; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks89100.00%2100.00%
Total89100.00%2100.00%


int svc_rpcb_setup(struct svc_serv *serv, struct net *net) { int err; err = rpcb_create_local(net); if (err) return err; /* Remove any stale portmap registrations */ svc_unregister(serv, net); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stanislav Kinsbursky43100.00%4100.00%
Total43100.00%4100.00%

EXPORT_SYMBOL_GPL(svc_rpcb_setup);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) { svc_unregister(serv, net); rpcb_put_local(net); }

Contributors

PersonTokensPropCommitsCommitProp
Stanislav Kinsbursky27100.00%4100.00%
Total27100.00%4100.00%

EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
static int svc_uses_rpcbind(struct svc_serv *serv) { struct svc_program *progp; unsigned int i; for (progp = serv->sv_program; progp; progp = progp->pg_next) { for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (!progp->pg_vers[i]->vs_hidden) return 1; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stanislav Kinsbursky8498.82%150.00%
Jeff Layton11.18%150.00%
Total85100.00%2100.00%


int svc_bind(struct svc_serv *serv, struct net *net) { if (!svc_uses_rpcbind(serv)) return 0; return svc_rpcb_setup(serv, net); }

Contributors

PersonTokensPropCommitsCommitProp
Stanislav Kinsbursky34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL_GPL(svc_bind); #if defined(CONFIG_SUNRPC_BACKCHANNEL)
static void __svc_init_bc(struct svc_serv *serv) { INIT_LIST_HEAD(&serv->sv_cb_list); spin_lock_init(&serv->sv_cb_lock); init_waitqueue_head(&serv->sv_cb_waitq); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust35100.00%1100.00%
Total35100.00%1100.00%

#else
static void __svc_init_bc(struct svc_serv *serv) { }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust10100.00%1100.00%
Total10100.00%1100.00%

#endif /* * Create an RPC service */
static struct svc_serv * __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, struct svc_serv_ops *ops) { struct svc_serv *serv; unsigned int vers; unsigned int xdrsize; unsigned int i; if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) return NULL; serv->sv_name = prog->pg_name; serv->sv_program = prog; serv->sv_nrthreads = 1; serv->sv_stats = prog->pg_stats; if (bufsize > RPCSVC_MAXPAYLOAD) bufsize = RPCSVC_MAXPAYLOAD; serv->sv_max_payload = bufsize? bufsize : 4096; serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); serv->sv_ops = ops; xdrsize = 0; while (prog) { prog->pg_lovers = prog->pg_nvers-1; for (vers=0; vers<prog->pg_nvers ; vers++) if (prog->pg_vers[vers]) { prog->pg_hivers = vers; if (prog->pg_lovers > vers) prog->pg_lovers = vers; if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) xdrsize = prog->pg_vers[vers]->vs_xdrsize; } prog = prog->pg_next; } serv->sv_xdrsize = xdrsize; INIT_LIST_HEAD(&serv->sv_tempsocks); INIT_LIST_HEAD(&serv->sv_permsocks); init_timer(&serv->sv_temptimer); spin_lock_init(&serv->sv_lock); __svc_init_bc(serv); serv->sv_nrpools = npools; serv->sv_pools = kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), GFP_KERNEL); if (!serv->sv_pools) { kfree(serv); return NULL; } for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; dprintk("svc: initialising pool %u for %s\n", i, serv->sv_name); pool->sp_id = i; INIT_LIST_HEAD(&pool->sp_sockets); INIT_LIST_HEAD(&pool->sp_all_threads); spin_lock_init(&pool->sp_lock); } return serv; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown14537.47%526.32%
Greg Banks11730.23%315.79%
Linus Torvalds (pre-git)8321.45%210.53%
Andreas Gruenbacher246.20%15.26%
Trond Myklebust51.29%15.26%
Robert P. J. Day41.03%15.26%
Jeff Layton41.03%15.26%
Stanislav Kinsbursky20.52%210.53%
Chuck Lever20.52%210.53%
Panagiotis Issaris10.26%15.26%
Total387100.00%19100.00%


struct svc_serv * svc_create(struct svc_program *prog, unsigned int bufsize, struct svc_serv_ops *ops) { return __svc_create(prog, bufsize, /*npools*/1, ops); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks3191.18%150.00%
Jeff Layton38.82%150.00%
Total34100.00%2100.00%

EXPORT_SYMBOL_GPL(svc_create);
struct svc_serv * svc_create_pooled(struct svc_program *prog, unsigned int bufsize, struct svc_serv_ops *ops) { struct svc_serv *serv; unsigned int npools = svc_pool_map_get(); serv = __svc_create(prog, bufsize, npools, ops); if (!serv) goto out_err; return serv; out_err: svc_pool_map_put(); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks5076.92%360.00%
Jeff Layton1523.08%240.00%
Total65100.00%5100.00%

EXPORT_SYMBOL_GPL(svc_create_pooled);
void svc_shutdown_net(struct svc_serv *serv, struct net *net) { svc_close_net(serv, net); if (serv->sv_ops->svo_shutdown) serv->sv_ops->svo_shutdown(serv, net); }

Contributors

PersonTokensPropCommitsCommitProp
Stanislav Kinsbursky3585.37%150.00%
Jeff Layton614.63%150.00%
Total41100.00%2100.00%

EXPORT_SYMBOL_GPL(svc_shutdown_net); /* * Destroy an RPC service. Should be called with appropriate locking to * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. */
void svc_destroy(struct svc_serv *serv) { dprintk("svc: svc_destroy(%s, %d)\n", serv->sv_program->pg_name, serv->sv_nrthreads); if (serv->sv_nrthreads) { if (--(serv->sv_nrthreads) != 0) { svc_sock_update_bufs(serv); return; } } else printk("svc_destroy: no threads for serv=%p!\n", serv); del_timer_sync(&serv->sv_temptimer); /* * The last user is gone and thus all sockets have to be destroyed to * the point. Check this. */ BUG_ON(!list_empty(&serv->sv_permsocks)); BUG_ON(!list_empty(&serv->sv_tempsocks)); cache_clean_deferred(serv); if (svc_serv_is_pooled(serv)) svc_pool_map_put(); kfree(serv->sv_pools); kfree(serv); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5747.50%220.00%
Greg Banks2520.83%330.00%
Stanislav Kinsbursky2319.17%110.00%
Neil Brown1411.67%330.00%
Chuck Lever10.83%110.00%
Total120100.00%10100.00%

EXPORT_SYMBOL_GPL(svc_destroy); /* * Allocate an RPC server's buffer space. * We allocate pages and place them in rq_argpages. */
static int svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) { unsigned int pages, arghi; /* bc_xprt uses fore channel allocated buffers */ if (svc_is_backchannel(rqstp)) return 1; pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page */ arghi = 0; WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); if (pages > RPCSVC_MAXPAGES) pages = RPCSVC_MAXPAGES; while (pages) { struct page *p = alloc_pages_node(node, GFP_KERNEL, 0); if (!p) break; rqstp->rq_pages[arghi++] = p; pages--; } return pages == 0; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown4844.04%440.00%
Linus Torvalds (pre-git)2522.94%110.00%
Weston Andros Adamson1110.09%110.00%
Andy Adamson1110.09%110.00%
Eric Dumazet87.34%110.00%
Chuck Lever43.67%110.00%
Kris Katterjohn21.83%110.00%
Total109100.00%10100.00%

/* * Release an RPC server buffer */
static void svc_release_buffer(struct svc_rqst *rqstp) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown4280.77%360.00%
Linus Torvalds (pre-git)917.31%120.00%
Chuck Lever11.92%120.00%
Total52100.00%5100.00%


struct svc_rqst * svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); if (!rqstp) return rqstp; __set_bit(RQ_BUSY, &rqstp->rq_flags); spin_lock_init(&rqstp->rq_lock); rqstp->rq_server = serv; rqstp->rq_pool = pool; rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_argp) goto out_enomem; rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_resp) goto out_enomem; if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) goto out_enomem; return rqstp; out_enomem: svc_rqst_free(rqstp); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jeff Layton7446.84%337.50%
Linus Torvalds (pre-git)6138.61%225.00%
Eric Dumazet148.86%112.50%
Greg Banks85.06%112.50%
Neil Brown10.63%112.50%
Total158100.00%8100.00%

EXPORT_SYMBOL_GPL(svc_rqst_alloc);
struct svc_rqst * svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; rqstp = svc_rqst_alloc(serv, pool, node); if (!rqstp) return ERR_PTR(-ENOMEM); serv->sv_nrthreads++; spin_lock_bh(&pool->sp_lock); pool->sp_nrthreads++; list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); spin_unlock_bh(&pool->sp_lock); return rqstp; }

Contributors

PersonTokensPropCommitsCommitProp
Jeff Layton8897.78%266.67%
Greg Banks22.22%133.33%
Total90100.00%3100.00%

EXPORT_SYMBOL_GPL(svc_prepare_thread); /* * Choose a pool in which to create a new thread, for svc_set_num_threads */
static inline struct svc_pool * choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { if (pool != NULL) return pool; return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks4590.00%150.00%
Linus Torvalds (pre-git)510.00%150.00%
Total50100.00%2100.00%

/* * Choose a thread to kill, for svc_set_num_threads */
static inline struct task_struct * choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { unsigned int i; struct task_struct *task = NULL; if (pool != NULL) { spin_lock_bh(&pool->sp_lock); } else { /* choose a pool in round-robin fashion */ for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_all_threads)) goto found_pool; spin_unlock_bh(&pool->sp_lock); } return NULL; } found_pool: if (!list_empty(&pool->sp_all_threads)) { struct svc_rqst *rqstp; /* * Remove from the pool->sp_all_threads list * so we don't try to kill it again. */ rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); set_bit(RQ_VICTIM, &rqstp->rq_flags); list_del_rcu(&rqstp->rq_all); task = rqstp->rq_task; } spin_unlock_bh(&pool->sp_lock); return task; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Banks17690.26%250.00%
Jeff Layton115.64%125.00%
Linus Torvalds (pre-git)84.10%125.00%
Total195100.00%4100.00%

/* * Create or destroy enough new threads to make the number * of threads the given number. If `pool' is non-NULL, applies * only to threads in that pool, otherwise round-robins between * all pools. Caller must ensure that mutual exclusion between this and * server startup or shutdown. * * Destroying threads relies on the service threads filling in * rqstp->rq_task, which only the nfs ones do. Assumes the serv * has been created using svc_create_pooled(). * * Based on code that used to be in nfsd_svc() but tweaked * to be pool-aware. */
int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { struct svc_rqst *rqstp; struct task_struct *task; struct svc_pool *chosen_pool; int error = 0; unsigned int state = serv->sv_nrthreads-1; int node; if (pool == NULL) { /* The -1 assumes caller has done a svc_get() */ nrservs -= (serv->sv_nrthreads-1); } else { spin_lock_bh(&pool->sp_lock); nrservs -= pool->sp_nrthreads; spin_unlock_bh(&pool->sp_lock); } /* create new threads */ while (nrservs > 0) { nrservs--; chosen_pool = choose_pool(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); rqstp = svc_prepare_thread(serv, chosen_pool, node); if (IS_ERR(rqstp)) { error = PTR_ERR(rqstp); break; } __module_get(serv->sv_ops->svo_module); task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { error = PTR_ERR(task); module_put(serv->sv_ops->svo_module); svc_exit_thread(rqstp); break; } rqstp