cregit-Linux how code gets into the kernel

Release 4.15 block/blk-sysfs.c

Directory: block
// SPDX-License-Identifier: GPL-2.0
/*
 * Functions related to sysfs handling
 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include <linux/blk-cgroup.h>

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-wbt.h"


struct queue_sysfs_entry {
	
struct attribute attr;
	
ssize_t (*show)(struct request_queue *, char *);
	
ssize_t (*store)(struct request_queue *, const char *, size_t);
};


static ssize_t queue_var_show(unsigned long var, char *page) { return sprintf(page, "%lu\n", var); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2291.67%150.00%
Xiaotian Feng28.33%150.00%
Total24100.00%2100.00%


static ssize_t queue_var_store(unsigned long *var, const char *page, size_t count) { int err; unsigned long v; err = kstrtoul(page, 10, &v); if (err || v > UINT_MAX) return -EINVAL; *var = v; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2950.00%133.33%
Dave Reisner2848.28%133.33%
Jingoo Han11.72%133.33%
Total58100.00%3100.00%


static ssize_t queue_var_store64(s64 *var, const char *page) { int err; s64 v; err = kstrtos64(page, 10, &v); if (err < 0) return err; *var = v; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe50100.00%2100.00%
Total50100.00%2100.00%


static ssize_t queue_requests_show(struct request_queue *q, char *page) { return queue_var_show(q->nr_requests, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe27100.00%1100.00%
Total27100.00%1100.00%


static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { unsigned long nr; int ret, err; if (!q->request_fn && !q->mq_ops) return -EINVAL; ret = queue_var_store(&nr, page, count); if (ret < 0) return ret; if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; if (q->request_fn) err = blk_update_nr_requests(q, nr); else err = blk_mq_update_nr_requests(q, nr); if (err) return err; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9990.00%350.00%
Dave Reisner98.18%116.67%
Tao Ma10.91%116.67%
Tejun Heo10.91%116.67%
Total110100.00%6100.00%


static ssize_t queue_ra_show(struct request_queue *q, char *page) { unsigned long ra_kb = q->backing_dev_info->ra_pages << (PAGE_SHIFT - 10); return queue_var_show(ra_kb, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3790.24%125.00%
Xiaotian Feng24.88%125.00%
Kirill A. Shutemov12.44%125.00%
Jan Kara12.44%125.00%
Total41100.00%4100.00%


static ssize_t queue_ra_store(struct request_queue *q, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); if (ret < 0) return ret; q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5182.26%125.00%
Dave Reisner914.52%125.00%
Jan Kara11.61%125.00%
Kirill A. Shutemov11.61%125.00%
Total62100.00%4100.00%


static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { int max_sectors_kb = queue_max_sectors(q) >> 1; return queue_var_show(max_sectors_kb, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3291.43%150.00%
Martin K. Petersen38.57%150.00%
Total35100.00%2100.00%


static ssize_t queue_max_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_segments(q), (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen28100.00%1100.00%
Total28100.00%1100.00%


static ssize_t queue_max_discard_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_discard_segments(q), (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig28100.00%1100.00%
Total28100.00%1100.00%


static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.max_integrity_segments, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen29100.00%1100.00%
Total29100.00%1100.00%


static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(PAGE_SIZE, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen4497.78%266.67%
Kirill A. Shutemov12.22%133.33%
Total45100.00%3100.00%


static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_logical_block_size(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen26100.00%2100.00%
Total26100.00%2100.00%


static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_physical_block_size(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen26100.00%1100.00%
Total26100.00%1100.00%


static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.chunk_sectors, page); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Reinecke27100.00%1100.00%
Total27100.00%1100.00%


static ssize_t queue_io_min_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_min(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen26100.00%1100.00%
Total26100.00%1100.00%


static ssize_t queue_io_opt_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_opt(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen26100.00%2100.00%
Total26100.00%2100.00%


static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.discard_granularity, page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen27100.00%1100.00%
Total27100.00%1100.00%


static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_hw_discard_sectors << 9); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2775.00%150.00%
Alan One Thousand Gnomes925.00%150.00%
Total36100.00%2100.00%


static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_discard_sectors << 9); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen36100.00%2100.00%
Total36100.00%2100.00%


static ssize_t queue_discard_max_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_discard; ssize_t ret = queue_var_store(&max_discard, page, count); if (ret < 0) return ret; if (max_discard & (q->limits.discard_granularity - 1)) return -EINVAL; max_discard >>= 9; if (max_discard > UINT_MAX) return -EINVAL; if (max_discard > q->limits.max_hw_discard_sectors) max_discard = q->limits.max_hw_discard_sectors; q->limits.max_discard_sectors = max_discard; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe106100.00%1100.00%
Total106100.00%1100.00%


static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { return queue_var_show(0, page); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen2295.65%150.00%
Christoph Hellwig14.35%150.00%
Total23100.00%2100.00%


static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_write_same_sectors << 9); }

Contributors

PersonTokensPropCommitsCommitProp
Martin K. Petersen36100.00%1100.00%
Total36100.00%1100.00%


static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_write_zeroes_sectors << 9); }

Contributors

PersonTokensPropCommitsCommitProp
Chaitanya Kulkarni36100.00%1100.00%
Total36100.00%1100.00%


static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); if (ret < 0) return ret; max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) q->limits.max_dev_sectors >> 1); if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; spin_lock_irq(q->queue_lock); q->limits.max_sectors = max_sectors_kb << 1; q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); spin_unlock_irq(q->queue_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10072.46%225.00%
Martin K. Petersen2215.94%225.00%
Dave Reisner96.52%112.50%
Nikanth Karthikesan53.62%112.50%
Kirill A. Shutemov10.72%112.50%
Jan Kara10.72%112.50%
Total138100.00%8100.00%


static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; return queue_var_show(max_hw_sectors_kb, (page)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3291.43%150.00%
Martin K. Petersen38.57%150.00%
Total35100.00%2100.00%

#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ queue_show_##name(struct request_queue *q, char *page) \ { \ int bit; \ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ return queue_var_show(neg ? !bit : bit, page); \ } \ static ssize_t \ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ { \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ if (ret < 0) \ return ret; \ if (neg) \ val = !val; \ \ spin_lock_irq(q->queue_lock); \ if (val) \ queue_flag_set(QUEUE_FLAG_##flag, q); \ else \ queue_flag_clear(QUEUE_FLAG_##flag, q); \ spin_unlock_irq(q->queue_lock); \ return ret; \ } QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); #undef QUEUE_SYSFS_BIT_FNS
static ssize_t queue_zoned_show(struct request_queue *q, char *page) { switch (blk_queue_zoned_model(q)) { case BLK_ZONED_HA: return sprintf(page, "host-aware\n"); case BLK_ZONED_HM: return sprintf(page, "host-managed\n"); default: return sprintf(page, "none\n"); } }

Contributors

PersonTokensPropCommitsCommitProp
Damien Le Moal55100.00%1100.00%
Total55100.00%1100.00%


static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show((blk_queue_nomerges(q) << 1) | blk_queue_noxmerges(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Alan D. Brunelle35100.00%2100.00%
Total35100.00%2100.00%


static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, size_t count) { unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); if (ret < 0) return ret; spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_NOMERGES, q); queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); if (nm == 2) queue_flag_set(QUEUE_FLAG_NOMERGES, q); else if (nm) queue_flag_set(QUEUE_FLAG_NOXMERGES, q); spin_unlock_irq(q->queue_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Alan D. Brunelle7877.23%250.00%
Jens Axboe1413.86%125.00%
Dave Reisner98.91%125.00%
Total101100.00%4100.00%


static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) { bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); return queue_var_show(set << force, page); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3568.63%133.33%
Dan J Williams1529.41%133.33%
Xiaotian Feng11.96%133.33%
Total51100.00%3100.00%


static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; #ifdef CONFIG_SMP unsigned long val; ret = queue_var_store(&val, page, count); if (ret < 0) return ret; spin_lock_irq(q->queue_lock); if (val == 2) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 1) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 0) { queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } spin_unlock_irq(q->queue_lock); #endif return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7855.71%120.00%
Eric Seppanen2719.29%120.00%
Dan J Williams2316.43%120.00%
Dave Reisner96.43%120.00%
Christoph Hellwig32.14%120.00%
Total140100.00%5100.00%


static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) { int val; if (q->poll_nsec == -1) val = -1; else val = q->poll_nsec / 1000; return sprintf(page, "%d\n", val); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe51100.00%3100.00%
Total51100.00%3100.00%


static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, size_t count) { int err, val; if (!q->mq_ops || !q->mq_ops->poll) return -EINVAL; err = kstrtoint(page, 10, &val); if (err < 0) return err; if (val == -1) q->poll_nsec = -1; else q->poll_nsec = val * 1000; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe89100.00%3100.00%
Total89100.00%3100.00%


static ssize_t queue_poll_show(struct request_queue *q, char *page) { return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe31100.00%1100.00%
Total31100.00%1100.00%


static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { unsigned long poll_on; ssize_t ret; if (!q->mq_ops || !q->mq_ops->poll) return -EINVAL; ret = queue_var_store(&poll_on, page, count); if (ret < 0) return ret; spin_lock_irq(q->queue_lock); if (poll_on) queue_flag_set(QUEUE_FLAG_POLL, q); else queue_flag_clear(QUEUE_FLAG_POLL, q); spin_unlock_irq(q->queue_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe101100.00%1100.00%
Total101100.00%1100.00%


static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) { if (!q->rq_wb) return -EINVAL; return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe45100.00%1100.00%
Total45100.00%1100.00%


static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, size_t count) { struct rq_wb *rwb; ssize_t ret; s64 val; ret = queue_var_store64(&val, page); if (ret < 0) return ret; if (val < -1) return -EINVAL; rwb = q->rq_wb; if (!rwb) { ret = wbt_init(q); if (ret) return ret; } rwb = q->rq_wb; if (val == -1) rwb->min_lat_nsec = wbt_default_latency_nsec(q); else if (val >= 0) rwb->min_lat_nsec = val * 1000ULL; if (rwb->enable_state == WBT_STATE_ON_DEFAULT) rwb->enable_state = WBT_STATE_ON_MANUAL; wbt_update_limits(rwb); return count; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe14599.32%375.00%
weiping zhang10.68%125.00%
Total146100.00%4100.00%


static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) return sprintf(page, "write back\n"); return sprintf(page, "write through\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe43100.00%1100.00%
Total43100.00%1100.00%


static ssize_t queue_wc_store(struct request_queue *q, const char *page, size_t count) { int set = -1; if (!strncmp(page, "write back", 10)) set = 1; else if (!strncmp(page, "write through", 13) || !strncmp(page, "none", 4)) set = 0; if (set == -1) return -EINVAL; spin_lock_irq(q->queue_lock); if (set) queue_flag_set(QUEUE_FLAG_WC, q); else queue_flag_clear(QUEUE_FLAG_WC, q); spin_unlock_irq(q->queue_lock); return count; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe115100.00%1100.00%
Total115100.00%1100.00%


static ssize_t queue_dax_show(struct request_queue *q, char *page) { return queue_var_show(blk_queue_dax(q), page); }

Contributors

PersonTokensPropCommitsCommitProp
Yigal Korman26100.00%1100.00%
Total26100.00%1100.00%

static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, .store = queue_requests_store, }; static struct queue_sysfs_entry queue_ra_entry = { .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_ra_show, .store = queue_ra_store, }; static struct queue_sysfs_entry queue_max_sectors_entry = { .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_max_sectors_show, .store = queue_max_sectors_store, }; static struct queue_sysfs_entry queue_max_hw_sectors_entry = { .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, .show = queue_max_hw_sectors_show, }; static struct queue_sysfs_entry queue_max_segments_entry = { .attr = {.name = "max_segments", .mode = S_IRUGO }, .show = queue_max_segments_show, }; static struct queue_sysfs_entry queue_max_discard_segments_entry = { .attr = {.name = "max_discard_segments", .mode = S_IRUGO }, .show = queue_max_discard_segments_show, }; static struct queue_sysfs_entry queue_max_integrity_segments_entry = { .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, .show = queue_max_integrity_segments_show, }; static struct queue_sysfs_entry queue_max_segment_size_entry = { .attr = {.name = "max_segment_size", .mode = S_IRUGO }, .show = queue_max_segment_size_show, }; static struct queue_sysfs_entry queue_iosched_entry = { .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, .show = elv_iosched_show, .store = elv_iosched_store, }; static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, .show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_logical_block_size_entry = { .attr = {.name = "logical_block_size", .mode = S_IRUGO }, .show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_physical_block_size_entry = { .attr = {.name = "physical_block_size", .mode = S_IRUGO }, .show = queue_physical_block_size_show, }; static struct queue_sysfs_entry queue_chunk_sectors_entry = { .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, .show = queue_chunk_sectors_show, }; static struct queue_sysfs_entry queue_io_min_entry = { .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, .show = queue_io_min_show, }; static struct queue_sysfs_entry queue_io_opt_entry = { .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, .show = queue_io_opt_show, }; static struct queue_sysfs_entry queue_discard_granularity_entry = { .attr = {.name = "discard_granularity", .mode = S_IRUGO }, .show = queue_discard_granularity_show, }; static struct queue_sysfs_entry queue_discard_max_hw_entry = { .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, .show = queue_discard_max_hw_show, }; static struct queue_sysfs_entry queue_discard_max_entry = { .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, .show = queue_discard_max_show, .store = queue_discard_max_store, }; static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, .show = queue_discard_zeroes_data_show, }; static struct queue_sysfs_entry queue_write_same_max_entry = { .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, .show = queue_write_same_max_show, }; static struct queue_sysfs_entry queue_write_zeroes_max_entry = { .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, .show = queue_write_zeroes_max_show, }; static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_nonrot, .store = queue_store_nonrot, }; static struct queue_sysfs_entry queue_zoned_entry = { .attr = {.name = "zoned", .mode = S_IRUGO }, .show = queue_zoned_show, }; static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .show = queue_nomerges_show, .store = queue_nomerges_store, }; static struct queue_sysfs_entry queue_rq_affinity_entry = { .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, .show = queue_rq_affinity_show, .store = queue_rq_affinity_store, }; static struct queue_sysfs_entry queue_iostats_entry = { .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_iostats, .store = queue_store_iostats, }; static struct queue_sysfs_entry queue_random_entry = { .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, .show = queue_show_random, .store = queue_store_random, }; static struct queue_sysfs_entry queue_poll_entry = { .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, .show = queue_poll_show, .store = queue_poll_store, }; static struct queue_sysfs_entry queue_poll_delay_entry = { .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, .show = queue_poll_delay_show, .store = queue_poll_delay_store, }; static struct queue_sysfs_entry queue_wc_entry = { .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, .show = queue_wc_show, .store = queue_wc_store, }; static struct queue_sysfs_entry queue_dax_entry = { .attr = {.name = "dax", .mode = S_IRUGO }, .show = queue_dax_show, }; static struct queue_sysfs_entry queue_wb_lat_entry = { .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, .show = queue_wb_lat_show, .store = queue_wb_lat_store, }; #ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR }, .show = blk_throtl_sample_time_show, .store = blk_throtl_sample_time_store, }; #endif static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, &queue_max_segments_entry.attr, &queue_max_discard_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, &queue_chunk_sectors_entry.attr, &queue_io_min_entry.attr, &queue_io_opt_entry.attr, &queue_discard_granularity_entry.attr, &queue_discard_max_entry.attr, &queue_discard_max_hw_entry.attr, &queue_discard_zeroes_data_entry.attr, &queue_write_same_max_entry.attr, &queue_write_zeroes_max_entry.attr, &queue_nonrot_entry.attr, &queue_zoned_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, &queue_random_entry.attr, &queue_poll_entry.attr, &queue_wc_entry.attr, &queue_dax_entry.attr, &queue_wb_lat_entry.attr, &queue_poll_delay_entry.attr, #ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif NULL, }; #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) return -EIO; mutex_lock(&q->sysfs_lock); if (blk_queue_dying(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->show(q, page); mutex_unlock(&q->sysfs_lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10999.09%150.00%
Bart Van Assche10.91%150.00%
Total110100.00%2100.00%


static ssize_t queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q; ssize_t res; if (!entry->store) return -EIO; q = container_of(kobj, struct request_queue, kobj); mutex_lock(&q->sysfs_lock); if (blk_queue_dying(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->store(q, page, length); mutex_unlock(&q->sysfs_lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe11799.15%266.67%
Bart Van Assche10.85%133.33%
Total118100.00%3100.00%


static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); kmem_cache_free(blk_requestq_cachep, q); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo33100.00%1100.00%
Total33100.00%1100.00%

/** * __blk_release_queue - release a request queue when it is no longer needed * @work: pointer to the release_work member of the request queue to be released * * Description: * blk_release_queue is the counterpart of blk_init_queue(). It should be * called when a request queue is being released; typically when a block * device is being de-registered. Its primary task it to free the queue * itself. * * Notes: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). * * Although blk_release_queue() may be called with preemption disabled, * __blk_release_queue() may sleep. */
static void __blk_release_queue(struct work_struct *work) { struct request_queue *q = container_of(work, typeof(*q), release_work); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); bdi_put(q->backing_dev_info); blkcg_exit_queue(q); if (q->elevator) { ioc_clear_queue(q); elevator_exit(q, q->elevator); } blk_free_queue_stats(q->stats); blk_exit_rl(q, &q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); if (!q->mq_ops) { if (q->exit_rq_fn) q->exit_rq_fn(q, q->fq->flush_rq); blk_free_flush_queue(q->fq); } else { blk_mq_release(q); } blk_trace_shutdown(q); if (q->mq_ops) blk_mq_debugfs_unregister(q); if (q->bio_split) bioset_free(q->bio_split); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5124.17%29.09%
Omar Sandoval4822.75%313.64%
Tejun Heo3818.01%731.82%
Christoph Hellwig2813.27%29.09%
Kent Overstreet136.16%14.55%
Bart Van Assche136.16%313.64%
Hannes Reinecke115.21%14.55%
Ming Lei83.79%29.09%
Jan Kara10.47%14.55%
Total211100.00%22100.00%


static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); INIT_WORK(&q->release_work, __blk_release_queue); schedule_work(&q->release_work); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche44100.00%1100.00%
Total44100.00%1100.00%

static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, }; struct kobj_type blk_queue_ktype = { .sysfs_ops = &queue_sysfs_ops, .default_attrs = default_attrs, .release = blk_release_queue, };
int blk_register_queue(struct gendisk *disk) { int ret; struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; if (WARN_ON(!q)) return -ENXIO; WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), "%s is registering an already registered queue\n", kobject_name(&dev->kobj)); queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); /* * SCSI probing may synchronously create and destroy a lot of * request_queues for non-existent devices. Shutting down a fully * functional queue takes measureable wallclock time as RCU grace * periods are involved. To avoid excessive latency in these * cases, a request_queue starts out in a degraded mode which is * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ if (!blk_queue_init_done(q)) { queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); percpu_ref_switch_to_percpu(&q->q_usage_counter); blk_queue_bypass_end(q); } ret = blk_trace_init_sysfs(dev); if (ret) return ret; /* Prevent changes through sysfs until registration is completed. */ mutex_lock(&q->sysfs_lock); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) { blk_trace_remove_sysfs(dev); goto unlock; } if (q->mq_ops) { __blk_mq_register_dev(dev, q); blk_mq_debugfs_register(q); } kobject_uevent(&q->kobj, KOBJ_ADD); wbt_enable_default(q); blk_throtl_register_queue(q); if (q->request_fn || (q->mq_ops && q->elevator)) { ret = elv_register_queue(q); if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); goto unlock; } } ret = 0; unlock: mutex_unlock(&q->sysfs_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10636.81%313.04%
Omar Sandoval4716.32%313.04%
Tahsin Erdogan3010.42%14.35%
Li Zefan258.68%14.35%
Bart Van Assche155.21%28.70%
Tejun Heo124.17%313.04%
Alan Stern103.47%14.35%
Akinobu Mita82.78%14.35%
Liu Yuan82.78%28.70%
Xiaotian Feng82.78%14.35%
Dan J Williams51.74%14.35%
Shaohua Li51.74%14.35%
Martin K. Petersen41.39%14.35%
Zdenek Kabelac41.39%14.35%
Jan Kara10.35%14.35%
Total288100.00%23100.00%


void blk_unregister_queue(struct gendisk *disk) { struct request_queue *q = disk->queue; if (WARN_ON(!q)) return; mutex_lock(&q->sysfs_lock); queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); mutex_unlock(&q->sysfs_lock); wbt_exit(q); if (q->mq_ops) blk_mq_unregister_dev(disk_to_dev(disk), q); if (q->request_fn || (q->mq_ops && q->elevator)) elv_unregister_queue(q); kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); kobject_put(&disk_to_dev(disk)->kobj); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6751.54%220.00%
Omar Sandoval2216.92%330.00%
David Jeffery1612.31%110.00%
Zdenek Kabelac86.15%110.00%
Akinobu Mita86.15%110.00%
Matias Björling64.62%110.00%
Tejun Heo32.31%110.00%
Total130100.00%10100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe245655.10%2018.52%
Martin K. Petersen74216.65%1312.04%
Alan D. Brunelle1523.41%21.85%
Omar Sandoval1202.69%87.41%
Tejun Heo972.18%1513.89%
Christoph Hellwig922.06%54.63%
Damien Le Moal871.95%10.93%
Bart Van Assche751.68%65.56%
Dave Reisner731.64%10.93%
Hannes Reinecke701.57%21.85%
Chaitanya Kulkarni681.53%10.93%
Yigal Korman581.30%10.93%
Shaohua Li541.21%21.85%
Bartlomiej Zolnierkiewicz541.21%10.93%
Dan J Williams430.96%21.85%
Tahsin Erdogan300.67%10.93%
Eric Seppanen270.61%10.93%
Li Zefan250.56%10.93%
David Jeffery160.36%10.93%
Akinobu Mita160.36%10.93%
Kent Overstreet130.29%10.93%
Xiaotian Feng130.29%21.85%
Zdenek Kabelac120.27%10.93%
Alan Stern100.22%10.93%
Alan One Thousand Gnomes90.20%10.93%
Ming Lei80.18%21.85%
Liu Yuan80.18%21.85%
Matias Björling60.13%10.93%
Nikanth Karthikesan50.11%10.93%
Jan Kara50.11%32.78%
Kirill A. Shutemov40.09%10.93%
Lei Ming30.07%10.93%
Emese Revfy10.02%10.93%
weiping zhang10.02%10.93%
Greg Kroah-Hartman10.02%10.93%
Jingoo Han10.02%10.93%
Tao Ma10.02%10.93%
Arnd Bergmann10.02%10.93%
Total4457100.00%108100.00%
Directory: block
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.