cregit-Linux how code gets into the kernel

Release 4.14 net/sunrpc/sched.c

Directory: net/sunrpc
/*
 * linux/net/sunrpc/sched.c
 *
 * Scheduling for synchronous and asynchronous RPC requests.
 *
 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
 *
 * TCP NFS related read + write fixes
 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 */

#include <linux/module.h>

#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/freezer.h>

#include <linux/sunrpc/clnt.h>

#include "sunrpc.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)

#define RPCDBG_FACILITY		RPCDBG_SCHED
#endif


#define CREATE_TRACE_POINTS
#include <trace/events/sunrpc.h>

/*
 * RPC slabs and memory pools
 */

#define RPC_BUFFER_MAXSIZE	(2048)

#define RPC_BUFFER_POOLSIZE	(8)

#define RPC_TASK_POOLSIZE	(8)

static struct kmem_cache	*rpc_task_slabp __read_mostly;

static struct kmem_cache	*rpc_buffer_slabp __read_mostly;

static mempool_t	*rpc_task_mempool __read_mostly;

static mempool_t	*rpc_buffer_mempool __read_mostly;

static void			rpc_async_schedule(struct work_struct *);
static void			 rpc_release_task(struct rpc_task *task);
static void __rpc_queue_timer_fn(unsigned long ptr);

/*
 * RPC tasks sit here while waiting for conditions to improve.
 */

static struct rpc_wait_queue delay_queue;

/*
 * rpciod-related stuff
 */

struct workqueue_struct *rpciod_workqueue __read_mostly;

struct workqueue_struct *xprtiod_workqueue __read_mostly;

/*
 * Disable the timer for a given RPC task. Should be called with
 * queue->lock and bh_disabled in order to avoid races within
 * rpc_run_timer().
 */

static void __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) { if (task->tk_timeout == 0) return; dprintk("RPC: %5u disabling timer\n", task->tk_pid); task->tk_timeout = 0; list_del(&task->u.tk_wait.timer_list); if (list_empty(&queue->timer_list.list)) del_timer(&queue->timer_list.timer); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust4966.22%250.00%
Linus Torvalds (pre-git)2432.43%125.00%
Chuck Lever11.35%125.00%
Total74100.00%4100.00%


static void rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) { queue->timer_list.expires = expires; mod_timer(&queue->timer_list.timer, expires); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust3497.14%150.00%
Linus Torvalds (pre-git)12.86%150.00%
Total35100.00%2100.00%

/* * Set up a timer for the current task. */
static void __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) { if (!task->tk_timeout) return; dprintk("RPC: %5u setting alarm for %u ms\n", task->tk_pid, jiffies_to_msecs(task->tk_timeout)); task->u.tk_wait.expires = jiffies + task->tk_timeout; if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) rpc_set_queue_timer(queue, task->u.tk_wait.expires); list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust6052.17%333.33%
Linus Torvalds (pre-git)5144.35%444.44%
Nicholas Mc Guire43.48%222.22%
Total115100.00%9100.00%


static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) { struct list_head *q = &queue->tasks[queue->priority]; struct rpc_task *task; if (!list_empty(q)) { task = list_first_entry(q, struct rpc_task, u.tk_wait.list); if (task->tk_owner == queue->owner) list_move_tail(&task->u.tk_wait.list, q); } }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust81100.00%1100.00%
Total81100.00%1100.00%


static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) { if (queue->priority != priority) { /* Fairness: rotate the list when changing priority */ rpc_rotate_queue_owner(queue); queue->priority = priority; } }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust36100.00%2100.00%
Total36100.00%2100.00%


static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) { queue->owner = pid; queue->nr = RPC_BATCH_COUNT; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust26100.00%1100.00%
Total26100.00%1100.00%


static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) { rpc_set_waitqueue_priority(queue, queue->maxpriority); rpc_set_waitqueue_owner(queue, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust27100.00%1100.00%
Total27100.00%1100.00%

/* * Add new request to a priority queue. */
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { struct list_head *q; struct rpc_task *t; INIT_LIST_HEAD(&task->u.tk_wait.links); if (unlikely(queue_priority > queue->maxpriority)) queue_priority = queue->maxpriority; if (queue_priority > queue->priority) rpc_set_waitqueue_priority(queue, queue_priority); q = &queue->tasks[queue_priority]; list_for_each_entry(t, q, u.tk_wait.list) { if (t->tk_owner == task->tk_owner) { list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); return; } } list_add_tail(&task->u.tk_wait.list, q); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust142100.00%5100.00%
Total142100.00%5100.00%

/* * Add new request to wait queue. * * Swapper tasks always get inserted at the head of the queue. * This should avoid many nasty memory deadlocks and hopefully * improve overall performance. * Everyone else gets appended to the queue to ensure proper FIFO behavior. */
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { WARN_ON_ONCE(RPC_IS_QUEUED(task)); if (RPC_IS_QUEUED(task)) return; if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); else if (RPC_IS_SWAPPER(task)) list_add(&task->u.tk_wait.list, &queue->tasks[0]); else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; queue->qlen++; /* barrier matches the read in rpc_wake_up_task_queue_locked() */ smp_wmb(); rpc_set_queued(task); dprintk("RPC: %5u added to queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5842.34%218.18%
Trond Myklebust5237.96%545.45%
Neil Brown128.76%19.09%
Weston Andros Adamson96.57%19.09%
Chuck Lever64.38%218.18%
Total137100.00%11100.00%

/* * Remove request from a priority queue. */
static void __rpc_remove_wait_queue_priority(struct rpc_task *task) { struct rpc_task *t; if (!list_empty(&task->u.tk_wait.links)) { t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); } }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust99100.00%2100.00%
Total99100.00%2100.00%

/* * Remove request from queue. * Note: must be called with spin lock held. */
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) { __rpc_disable_timer(queue, task); if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue_priority(task); list_del(&task->u.tk_wait.list); queue->qlen--; dprintk("RPC: %5u removed from queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust6291.18%571.43%
Chuck Lever68.82%228.57%
Total68100.00%7100.00%


static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) { int i; spin_lock_init(&queue->lock); for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) INIT_LIST_HEAD(&queue->tasks[i]); queue->maxpriority = nr_queues - 1; rpc_reset_waitqueue_priority(queue); queue->qlen = 0; setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); INIT_LIST_HEAD(&queue->timer_list.list); rpc_assign_waitqueue_name(queue, qname); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust9280.70%562.50%
Linus Torvalds (pre-git)2118.42%225.00%
Neil Brown10.88%112.50%
Total114100.00%8100.00%


void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) { __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust1562.50%266.67%
Linus Torvalds (pre-git)937.50%133.33%
Total24100.00%3100.00%

EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) { __rpc_init_priority_wait_queue(queue, qname, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust2187.50%266.67%
Linus Torvalds (pre-git)312.50%133.33%
Total24100.00%3100.00%

EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) { del_timer_sync(&queue->timer_list.timer); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust20100.00%2100.00%
Total20100.00%2100.00%

EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) { freezable_schedule_unsafe(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust2060.61%125.00%
Peter Zijlstra927.27%125.00%
Neil Brown39.09%125.00%
Matthew Wilcox13.03%125.00%
Total33100.00%4100.00%

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static void rpc_task_set_debuginfo(struct rpc_task *task) { static atomic_t rpc_pid; task->tk_pid = atomic_inc_return(&rpc_pid); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust25100.00%1100.00%
Total25100.00%1100.00%

#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task) { }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust11100.00%1100.00%
Total11100.00%1100.00%

#endif
static void rpc_set_active(struct rpc_task *task) { trace_rpc_task_begin(task->tk_client, task, NULL); rpc_task_set_debuginfo(task); set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust37100.00%5100.00%
Total37100.00%5100.00%

/* * Mark an RPC call as having completed by clearing the 'active' bit * and then waking up all tasks that were sleeping. */
static int rpc_complete_task(struct rpc_task *task) { void *m = &task->tk_runstate; wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); unsigned long flags; int ret; trace_rpc_task_complete(task->tk_client, task, NULL); spin_lock_irqsave(&wq->lock, flags); clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); ret = atomic_dec_and_test(&task->tk_count); if (waitqueue_active(wq)) __wake_up_locked_key(wq, TASK_NORMAL, &k); spin_unlock_irqrestore(&wq->lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust120100.00%4100.00%
Total120100.00%4100.00%

/* * Allow callers to wait for completion of an RPC call * * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() * to enforce taking of the wq->lock and hence avoid races with * rpc_complete_task(). */
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) { if (action == NULL) action = rpc_wait_bit_killable; return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, action, TASK_KILLABLE); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust3692.31%250.00%
Matthew Wilcox25.13%125.00%
Neil Brown12.56%125.00%
Total39100.00%4100.00%

EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); /* * Make an RPC task runnable. * * Note: If the task is ASYNC, and is being made runnable after sitting on an * rpc_wait_queue, this must be called with the queue spinlock held to protect * the wait queue operation. * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), * which is needed to ensure that __rpc_execute() doesn't loop (due to the * lockless RPC_IS_QUEUED() test) before we've had a chance to test * the RPC_TASK_RUNNING flag. */
static void rpc_make_runnable(struct workqueue_struct *wq, struct rpc_task *task) { bool need_wakeup = !rpc_test_and_set_running(task); rpc_clear_queued(task); if (!need_wakeup) return; if (RPC_IS_ASYNC(task)) { INIT_WORK(&task->u.tk_work, rpc_async_schedule); queue_work(wq, &task->u.tk_work); } else wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust4050.00%444.44%
Linus Torvalds (pre-git)3543.75%444.44%
Christophe Saout56.25%111.11%
Total80100.00%9100.00%

/* * Prepare for sleeping on a wait queue. * By always appending tasks to the list we ensure FIFO behavior. * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, unsigned char queue_priority) { dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", task->tk_pid, rpc_qname(q), jiffies); trace_rpc_task_sleep(task->tk_client, task, q); __rpc_add_wait_queue(q, task, queue_priority); WARN_ON_ONCE(task->tk_callback != NULL); task->tk_callback = action; __rpc_add_timer(q, task); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5770.37%650.00%
Trond Myklebust2227.16%433.33%
Chuck Lever11.23%18.33%
Weston Andros Adamson11.23%18.33%
Total81100.00%12100.00%


void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action) { /* We shouldn't ever put an inactive task to sleep */ WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); if (!RPC_IS_ACTIVATED(task)) { task->tk_status = -EIO; rpc_put_task_async(task); return; } /* * Protect the queue operations. */ spin_lock_bh(&q->lock); __rpc_sleep_on_priority(q, task, action, task->tk_priority); spin_unlock_bh(&q->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3644.44%337.50%
Weston Andros Adamson2429.63%112.50%
Trond Myklebust2125.93%450.00%
Total81100.00%8100.00%

EXPORT_SYMBOL_GPL(rpc_sleep_on);
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, int priority) { /* We shouldn't ever put an inactive task to sleep */ WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); if (!RPC_IS_ACTIVATED(task)) { task->tk_status = -EIO; rpc_put_task_async(task); return; } /* * Protect the queue operations. */ spin_lock_bh(&q->lock); __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); spin_unlock_bh(&q->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust6071.43%150.00%
Weston Andros Adamson2428.57%150.00%
Total84100.00%2100.00%

EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); /** * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task * @wq: workqueue on which to run task * @queue: wait queue * @task: task to be woken up * * Caller must hold queue->lock, and have cleared the task queued flag. */
static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, struct rpc_wait_queue *queue, struct rpc_task *task) { dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", task->tk_pid, jiffies); /* Has the task been executed yet? If not, we cannot wake it up! */ if (!RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); return; } trace_rpc_task_wakeup(task->tk_client, task, queue); __rpc_remove_wait_queue(queue, task); rpc_make_runnable(wq, task); dprintk("RPC: __rpc_wake_up_task done\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4352.44%228.57%
Trond Myklebust3745.12%457.14%
Chuck Lever22.44%114.29%
Total82100.00%7100.00%

/* * Wake up a queued task while the queue lock is being held */
static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq, struct rpc_wait_queue *queue, struct rpc_task *task) { if (RPC_IS_QUEUED(task)) { smp_rmb(); if (task->tk_waitqueue == queue) __rpc_do_wake_up_task_on_wq(wq, queue, task); } }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust4284.00%571.43%
Linus Torvalds (pre-git)816.00%228.57%
Total50100.00%7100.00%

/* * Wake up a queued task while the queue lock is being held */
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) { rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust25100.00%1100.00%
Total25100.00%1100.00%

/* * Wake up a task on a specific queue */
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) { spin_lock_bh(&queue->lock); rpc_wake_up_task_queue_locked(queue, task); spin_unlock_bh(&queue->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust38100.00%5100.00%
Total38100.00%5100.00%

EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); /* * Wake up the next task on a priority queue. */
static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) { struct list_head *q; struct rpc_task *task; /* * Service a batch of tasks from a single owner. */ q = &queue->tasks[queue->priority]; if (!list_empty(q)) { task = list_entry(q->next, struct rpc_task, u.tk_wait.list); if (queue->owner == task->tk_owner) { if (--queue->nr) goto out; list_move_tail(&task->u.tk_wait.list, q); } /* * Check if we need to switch queues. */ goto new_owner; } /* * Service the next queue. */ do { if (q == &queue->tasks[0]) q = &queue->tasks[queue->maxpriority]; else q = q - 1; if (!list_empty(q)) { task = list_entry(q->next, struct rpc_task, u.tk_wait.list); goto new_queue; } } while (q != &queue->tasks[queue->priority]); rpc_reset_waitqueue_priority(queue); return NULL; new_queue: rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); new_owner: rpc_set_waitqueue_owner(queue, task->tk_owner); out: return task; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust21190.95%457.14%
Linus Torvalds (pre-git)219.05%342.86%
Total232100.00%7100.00%


static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) { if (RPC_IS_PRIORITY(queue)) return __rpc_find_next_queued_priority(queue); if (!list_empty(&queue->tasks[0])) return list_first_entry(&queue->tasks[0]