cregit-Linux how code gets into the kernel

Release 4.12 block/cfq-iosched.c

Directory: block
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/ktime.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include <linux/blk-cgroup.h>
#include "blk.h"
#include "blk-wbt.h"

/*
 * tunables
 */
/* max queue in one round of service */

static const int cfq_quantum = 8;

static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* maximum backwards seek, in KiB */

static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */

static const int cfq_back_penalty = 2;

static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;

static u64 cfq_slice_async = NSEC_PER_SEC / 25;

static const int cfq_slice_async_rq = 2;

static u64 cfq_slice_idle = NSEC_PER_SEC / 125;

static u64 cfq_group_idle = NSEC_PER_SEC / 125;

static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; 
/* 300 ms */

static const int cfq_hist_divisor = 4;

/*
 * offset from end of queue service tree for idle class
 */

#define CFQ_IDLE_DELAY		(NSEC_PER_SEC / 5)
/* offset from end of group service tree under time slice mode */

#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service under IOPS mode */

#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)

/*
 * below this threshold, we consider thinktime immediate
 */

#define CFQ_MIN_TT		(2 * NSEC_PER_SEC / HZ)


#define CFQ_SLICE_SCALE		(5)

#define CFQ_HW_QUEUE_MIN	(5)

#define CFQ_SERVICE_SHIFT       12


#define CFQQ_SEEK_THR		(sector_t)(8 * 100)

#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)

#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)

#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)


#define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)

#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])

#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])


static struct kmem_cache *cfq_pool;


#define CFQ_PRIO_LISTS		IOPRIO_BE_NR

#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)

#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)


#define sample_valid(samples)	((samples) > 80)

#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)

/* blkio-related constants */

#define CFQ_WEIGHT_LEGACY_MIN	10

#define CFQ_WEIGHT_LEGACY_DFL	500

#define CFQ_WEIGHT_LEGACY_MAX	1000


struct cfq_ttime {
	
u64 last_end_request;

	
u64 ttime_total;
	
u64 ttime_mean;
	
unsigned long ttime_samples;
};

/*
 * Most of our rbtree usage is for sorting with min extraction, so
 * if we cache the leftmost node we don't have to walk down the tree
 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 * move this into the elevator for the rq sorting as well.
 */

struct cfq_rb_root {
	
struct rb_root rb;
	
struct rb_node *left;
	
unsigned count;
	
u64 min_vdisktime;
	
struct cfq_ttime ttime;
};

#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
                        .ttime = {.last_end_request = ktime_get_ns(),},}

/*
 * Per process-grouping structure
 */

struct cfq_queue {
	/* reference count */
	
int ref;
	/* various state flags, see below */
	
unsigned int flags;
	/* parent cfq_data */
	
struct cfq_data *cfqd;
	/* service_tree member */
	
struct rb_node rb_node;
	/* service_tree key */
	
u64 rb_key;
	/* prio tree member */
	
struct rb_node p_node;
	/* prio tree root we belong to, if any */
	
struct rb_root *p_root;
	/* sorted list of pending requests */
	
struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
	
struct request *next_rq;
	/* requests queued in sort_list */
	
int queued[2];
	/* currently allocated requests */
	
int allocated[2];
	/* fifo list of requests in sort_list */
	
struct list_head fifo;

	/* time when queue got scheduled in to dispatch first request. */
	
u64 dispatch_start;
	
u64 allocated_slice;
	
u64 slice_dispatch;
	/* time when first request from queue completed and slice started. */
	
u64 slice_start;
	
u64 slice_end;
	
s64 slice_resid;

	/* pending priority requests */
	
int prio_pending;
	/* number of requests that are on the dispatch list or inside driver */
	
int dispatched;

	/* io prio of this group */
	

unsigned short ioprio, org_ioprio;
	

unsigned short ioprio_class, org_ioprio_class;

	
pid_t pid;

	
u32 seek_history;
	
sector_t last_request_pos;

	
struct cfq_rb_root *service_tree;
	
struct cfq_queue *new_cfqq;
	
struct cfq_group *cfqg;
	/* Number of sectors dispatched from queue in single dispatch round */
	
unsigned long nr_sectors;
};

/*
 * First index in the service_trees.
 * IDLE is handled separately, so it has negative index
 */

enum wl_class_t {
	
BE_WORKLOAD = 0,
	
RT_WORKLOAD = 1,
	
IDLE_WORKLOAD = 2,
	
CFQ_PRIO_NR,
};

/*
 * Second index in the service_trees.
 */

enum wl_type_t {
	
ASYNC_WORKLOAD = 0,
	
SYNC_NOIDLE_WORKLOAD = 1,
	
SYNC_WORKLOAD = 2
};


struct cfqg_stats {
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	/* number of ios merged */
	
struct blkg_rwstat		merged;
	/* total time spent on device in ns, may not be accurate w/ queueing */
	
struct blkg_rwstat		service_time;
	/* total time spent waiting in scheduler queue in ns */
	
struct blkg_rwstat		wait_time;
	/* number of IOs queued up */
	
struct blkg_rwstat		queued;
	/* total disk time and nr sectors dispatched by this group */
	
struct blkg_stat		time;
#ifdef CONFIG_DEBUG_BLK_CGROUP
	/* time not charged to this cgroup */
	
struct blkg_stat		unaccounted_time;
	/* sum of number of ios queued across all samples */
	
struct blkg_stat		avg_queue_size_sum;
	/* count of samples taken for average */
	
struct blkg_stat		avg_queue_size_samples;
	/* how many times this group has been removed from service tree */
	
struct blkg_stat		dequeue;
	/* total time spent waiting for it to be assigned a timeslice. */
	
struct blkg_stat		group_wait_time;
	/* time spent idling for this blkcg_gq */
	
struct blkg_stat		idle_time;
	/* total time with empty current active q with other requests queued */
	
struct blkg_stat		empty_time;
	/* fields after this shouldn't be cleared on stat reset */
	
uint64_t			start_group_wait_time;
	
uint64_t			start_idle_time;
	
uint64_t			start_empty_time;
	
uint16_t			flags;
#endif	/* CONFIG_DEBUG_BLK_CGROUP */
#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
};

/* Per-cgroup data */

struct cfq_group_data {
	/* must be the first member */
	
struct blkcg_policy_data cpd;

	
unsigned int weight;
	
unsigned int leaf_weight;
};

/* This is per cgroup per device grouping structure */

struct cfq_group {
	/* must be the first member */
	
struct blkg_policy_data pd;

	/* group service_tree member */
	
struct rb_node rb_node;

	/* group service_tree key */
	
u64 vdisktime;

	/*
         * The number of active cfqgs and sum of their weights under this
         * cfqg.  This covers this cfqg's leaf_weight and all children's
         * weights, but does not cover weights of further descendants.
         *
         * If a cfqg is on the service tree, it's active.  An active cfqg
         * also activates its parent and contributes to the children_weight
         * of the parent.
         */
	
int nr_active;
	
unsigned int children_weight;

	/*
         * vfraction is the fraction of vdisktime that the tasks in this
         * cfqg are entitled to.  This is determined by compounding the
         * ratios walking up from this cfqg to the root.
         *
         * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
         * vfractions on a service tree is approximately 1.  The sum may
         * deviate a bit due to rounding errors and fluctuations caused by
         * cfqgs entering and leaving the service tree.
         */
	
unsigned int vfraction;

	/*
         * There are two weights - (internal) weight is the weight of this
         * cfqg against the sibling cfqgs.  leaf_weight is the wight of
         * this cfqg against the child cfqgs.  For the root cfqg, both
         * weights are kept in sync for backward compatibility.
         */
	
unsigned int weight;
	
unsigned int new_weight;
	
unsigned int dev_weight;

	
unsigned int leaf_weight;
	
unsigned int new_leaf_weight;
	
unsigned int dev_leaf_weight;

	/* number of cfqq currently on this group */
	
int nr_cfqq;

	/*
         * Per group busy queues average. Useful for workload slice calc. We
         * create the array for each prio class but at run time it is used
         * only for RT and BE class and slot for IDLE class remains unused.
         * This is primarily done to avoid confusion and a gcc warning.
         */
	
unsigned int busy_queues_avg[CFQ_PRIO_NR];
	/*
         * rr lists of queues with requests. We maintain service trees for
         * RT and BE classes. These trees are subdivided in subclasses
         * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
         * class there is no subclassification and all the cfq queues go on
         * a single tree service_tree_idle.
         * Counts are embedded in the cfq_rb_root
         */
	
struct cfq_rb_root service_trees[2][3];
	
struct cfq_rb_root service_tree_idle;

	
u64 saved_wl_slice;
	
enum wl_type_t saved_wl_type;
	
enum wl_class_t saved_wl_class;

	/* number of requests that are on the dispatch list or inside driver */
	
int dispatched;
	
struct cfq_ttime ttime;
	
struct cfqg_stats stats;	/* stats for this cfqg */

	/* async queue for each priority case */
	
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	
struct cfq_queue *async_idle_cfqq;

};


struct cfq_io_cq {
	
struct io_cq		icq;		/* must be the first member */
	
struct cfq_queue	*cfqq[2];
	
struct cfq_ttime	ttime;
	
int			ioprio;		/* the current ioprio */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	
uint64_t		blkcg_serial_nr; /* the current blkcg serial */
#endif
};

/*
 * Per block device queue structure
 */

struct cfq_data {
	
struct request_queue *queue;
	/* Root service tree for cfq_groups */
	
struct cfq_rb_root grp_service_tree;
	
struct cfq_group *root_group;

	/*
         * The priority currently being served
         */
	
enum wl_class_t serving_wl_class;
	
enum wl_type_t serving_wl_type;
	
u64 workload_expires;
	
struct cfq_group *serving_group;

	/*
         * Each priority tree is sorted by next_request position.  These
         * trees are used when determining if two or more queues are
         * interleaving requests (see cfq_close_cooperator).
         */
	
struct rb_root prio_trees[CFQ_PRIO_LISTS];

	
unsigned int busy_queues;
	
unsigned int busy_sync_queues;

	
int rq_in_driver;
	
int rq_in_flight[2];

	/*
         * queue-depth detection
         */
	
int rq_queued;
	
int hw_tag;
	/*
         * hw_tag can be
         * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
         *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
         *  0 => no NCQ
         */
	
int hw_tag_est_depth;
	
unsigned int hw_tag_samples;

	/*
         * idle window management
         */
	
struct hrtimer idle_slice_timer;
	
struct work_struct unplug_work;

	
struct cfq_queue *active_queue;
	
struct cfq_io_cq *active_cic;

	
sector_t last_position;

	/*
         * tunables, see top of file
         */
	
unsigned int cfq_quantum;
	
unsigned int cfq_back_penalty;
	
unsigned int cfq_back_max;
	
unsigned int cfq_slice_async_rq;
	
unsigned int cfq_latency;
	
u64 cfq_fifo_expire[2];
	
u64 cfq_slice[2];
	
u64 cfq_slice_idle;
	
u64 cfq_group_idle;
	
u64 cfq_target_latency;

	/*
         * Fallback dummy cfqq for extreme OOM conditions
         */
	
struct cfq_queue oom_cfqq;

	
u64 last_delayed_sync;
};

static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
static void cfq_put_queue(struct cfq_queue *cfqq);


static struct cfq_rb_root *st_for(struct cfq_group *cfqg, enum wl_class_t class, enum wl_type_t type) { if (!cfqg) return NULL; if (class == IDLE_WORKLOAD) return &cfqg->service_tree_idle; return &cfqg->service_trees[class][type]; }

Contributors

PersonTokensPropCommitsCommitProp
Corrado Zoccolo3362.26%233.33%
Vivek Goyal2037.74%466.67%
Total53100.00%6100.00%

enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ } CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) /* cfqg stats flags */ enum cfqg_stats_flags { CFQG_stats_waiting = 0, CFQG_stats_idling, CFQG_stats_empty, }; #define CFQG_FLAG_FNS(name) \ static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ { \ stats->flags |= (1 << CFQG_stats_##name); \ } \ static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ { \ stats->flags &= ~(1 << CFQG_stats_##name); \ } \ static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ { \ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ } \ CFQG_FLAG_FNS(waiting) CFQG_FLAG_FNS(idling) CFQG_FLAG_FNS(empty) #undef CFQG_FLAG_FNS /* This should be called with the queue_lock held. */
static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) { unsigned long long now; if (!cfqg_stats_waiting(stats)) return; now = sched_clock(); if (time_after64(now, stats->start_group_wait_time)) blkg_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); cfqg_stats_clear_waiting(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo60100.00%3100.00%
Total60100.00%3100.00%

/* This should be called with the queue_lock held. */
static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { struct cfqg_stats *stats = &cfqg->stats; if (cfqg_stats_waiting(stats)) return; if (cfqg == curr_cfqg) return; stats->start_group_wait_time = sched_clock(); cfqg_stats_mark_waiting(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo53100.00%3100.00%
Total53100.00%3100.00%

/* This should be called with the queue_lock held. */
static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { unsigned long long now; if (!cfqg_stats_empty(stats)) return; now = sched_clock(); if (time_after64(now, stats->start_empty_time)) blkg_stat_add(&stats->empty_time, now - stats->start_empty_time); cfqg_stats_clear_empty(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo60100.00%3100.00%
Total60100.00%3100.00%


static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { blkg_stat_add(&cfqg->stats.dequeue, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo23100.00%3100.00%
Total23100.00%3100.00%


static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { struct cfqg_stats *stats = &cfqg->stats; if (blkg_rwstat_total(&stats->queued)) return; /* * group is already marked empty. This can happen if cfqq got new * request in parent group and moved to this group while being added * to service tree. Just ignore the event and move on. */ if (cfqg_stats_empty(stats)) return; stats->start_empty_time = sched_clock(); cfqg_stats_mark_empty(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5196.23%375.00%
Vivek Goyal23.77%125.00%
Total53100.00%4100.00%


static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { struct cfqg_stats *stats = &cfqg->stats; if (cfqg_stats_idling(stats)) { unsigned long long now = sched_clock(); if (time_after64(now, stats->start_idle_time)) blkg_stat_add(&stats->idle_time, now - stats->start_idle_time); cfqg_stats_clear_idling(stats); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo68100.00%2100.00%
Total68100.00%2100.00%


static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { struct cfqg_stats *stats = &cfqg->stats; BUG_ON(cfqg_stats_idling(stats)); stats->start_idle_time = sched_clock(); cfqg_stats_mark_idling(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo41100.00%2100.00%
Total41100.00%2100.00%


static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { struct cfqg_stats *stats = &cfqg->stats; blkg_stat_add(&stats->avg_queue_size_sum, blkg_rwstat_total(&stats->queued)); blkg_stat_add(&stats->avg_queue_size_samples, 1); cfqg_stats_update_group_wait_time(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo52100.00%3100.00%
Total52100.00%3100.00%

#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo16100.00%3100.00%
Total16100.00%3100.00%


static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%


static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%


static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%


static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%


static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%


static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%3100.00%
Total11100.00%3100.00%

#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ #ifdef CONFIG_CFQ_GROUP_IOSCHED
static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd) { return pd ? container_of(pd, struct cfq_group, pd) : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe29100.00%1100.00%
Total29100.00%1100.00%


static struct cfq_group_data *cpd_to_cfqgd(struct blkcg_policy_data *cpd) { return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2796.43%150.00%
Tejun Heo13.57%150.00%
Total28100.00%2100.00%


static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) { return pd_to_blkg(&cfqg->pd); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe23100.00%1100.00%
Total23100.00%1100.00%

static struct blkcg_policy blkcg_policy_cfq;
static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) { return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo26100.00%1100.00%
Total26100.00%1100.00%


static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg) { return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq)); }

Contributors

PersonTokensPropCommitsCommitProp
Arianna Avanzini25100.00%1100.00%
Total25100.00%1100.00%


static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; return pblkg ? blkg_to_cfqg(pblkg) : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo36100.00%2100.00%
Total36100.00%2100.00%


static inline bool cfqg_is_descendant(struct cfq_group *cfqg, struct cfq_group *ancestor) { return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup, cfqg_to_blkg(ancestor)->blkcg->css.cgroup); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kara43100.00%1100.00%
Total43100.00%1100.00%


static inline void cfqg_get(struct cfq_group *cfqg) { return blkg_get(cfqg_to_blkg(cfqg)); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo21100.00%1100.00%
Total21100.00%1100.00%


static inline void cfqg_put(struct cfq_group *cfqg) { return blkg_put(cfqg_to_blkg(cfqg)); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo21100.00%1100.00%
Total21100.00%1100.00%

#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ char __pbuf[128]; \ \ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ __pbuf, ##args); \ } while (0) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ char __pbuf[128]; \ \ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ } while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, struct cfq_group *curr_cfqg, unsigned int op) { blkg_rwstat_add(&cfqg->stats.queued, op, 1); cfqg_stats_end_empty_time(&cfqg->stats); cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo4692.00%466.67%
Michael Christie36.00%116.67%
Christoph Hellwig12.00%116.67%
Total50100.00%6100.00%


static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, uint64_t time, unsigned long unaccounted_time) { blkg_stat_add(&cfqg->stats.time, time); #ifdef CONFIG_DEBUG_BLK_CGROUP blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo4797.92%480.00%
Jeff Moyer12.08%120.00%
Total48100.00%5100.00%


static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, unsigned int op) { blkg_rwstat_add(&cfqg->stats.queued, op, -1); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo2787.10%466.67%
Michael Christie39.68%116.67%
Christoph Hellwig13.23%116.67%
Total31100.00%6100.00%


static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, unsigned int op) { blkg_rwstat_add(&cfqg->stats.merged, op, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo2686.67%360.00%
Michael Christie310.00%120.00%
Christoph Hellwig13.33%120.00%
Total30100.00%5100.00%


static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, uint64_t start_time, uint64_t io_start_time, unsigned int op) { struct cfqg_stats *stats = &cfqg->stats; unsigned long long now = sched_clock(); if (time_after64(now, io_start_time)) blkg_rwstat_add(&stats->service_time, op, now - io_start_time); if (time_after64(io_start_time, start_time)) blkg_rwstat_add(&stats->wait_time, op, io_start_time - start_time); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo8093.02%360.00%
Michael Christie55.81%120.00%
Christoph Hellwig11.16%120.00%
Total86100.00%5100.00%

/* @stats = 0 */
static void cfqg_stats_reset(struct cfqg_stats *stats) { /* queued stats shouldn't be cleared */ blkg_rwstat_reset(&stats->merged); blkg_rwstat_reset(&stats->service_time); blkg_rwstat_reset(&stats->wait_time); blkg_stat_reset(&stats->time); #ifdef CONFIG_DEBUG_BLK_CGROUP blkg_stat_reset(&stats->unaccounted_time); blkg_stat_reset(&stats->avg_queue_size_sum); blkg_stat_reset(&stats->avg_queue_size_samples); blkg_stat_reset(&stats->dequeue); blkg_stat_reset(&stats->group_wait_time); blkg_stat_reset(&stats->idle_time); blkg_stat_reset(&stats->empty_time); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo105100.00%2100.00%
Total105100.00%2100.00%

/* @to += @from */
static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from) { /* queued stats shouldn't be cleared */ blkg_rwstat_add_aux(&to->merged, &from->merged); blkg_rwstat_add_aux(&to->service_time, &from->service_time); blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); blkg_stat_add_aux(&from->time, &from->time); #ifdef CONFIG_DEBUG_BLK_CGROUP blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); blkg_stat_add_aux(&to->dequeue, &from->dequeue); blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); blkg_stat_add_aux(&to->idle_time, &from->idle_time); blkg_stat_add_aux(&to->empty_time, &from->empty_time); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo165100.00%3100.00%
Total165100.00%3100.00%

/* * Transfer @cfqg's stats to its parent's aux counts so that the ancestors' * recursive stats can still account for the amount used by this cfqg after * it's gone. */
static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) { struct cfq_group *parent = cfqg_parent(cfqg); lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); if (unlikely(!parent)) return; cfqg_stats_add_aux(&parent->stats, &cfqg->stats); cfqg_stats_reset(&cfqg->stats); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo63100.00%3100.00%
Total63100.00%3100.00%

#else /* CONFIG_CFQ_GROUP_IOSCHED */
static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo17100.00%2100.00%
Total17100.00%2100.00%


static inline bool cfqg_is_descendant(struct cfq_group *cfqg, struct cfq_group *ancestor) { return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kara20100.00%1100.00%
Total20100.00%1100.00%


static inline void cfqg_get(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%1100.00%
Total11100.00%1100.00%


static inline void cfqg_put(struct cfq_group *cfqg) { }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo11100.00%1100.00%
Total11100.00%1100.00%

#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ ##args) #