Contributors: 24
Author Tokens Token Proportion Commits Commit Proportion
Jens Axboe 130 25.59% 5 12.82%
Christof Schmitt 56 11.02% 1 2.56%
Alan D. Brunelle 52 10.24% 1 2.56%
Jan Kara 49 9.65% 3 7.69%
Christoph Hellwig 45 8.86% 4 10.26%
Shaohua Li 42 8.27% 2 5.13%
Arnd Bergmann 33 6.50% 2 5.13%
Stefan Raspl 26 5.12% 1 2.56%
Li Zefan 19 3.74% 1 2.56%
Martin K. Petersen 10 1.97% 2 5.13%
yu kuai 8 1.57% 1 2.56%
Arnaldo Carvalho de Melo 5 0.98% 2 5.13%
Stephen Rothwell 5 0.98% 1 2.56%
Bart Van Assche 5 0.98% 1 2.56%
Martin Schwidefsky 5 0.98% 2 5.13%
Andrew Morton 4 0.79% 2 5.13%
David Howells 3 0.59% 1 2.56%
Tejun Heo 2 0.39% 1 2.56%
Shawn Du 2 0.39% 1 2.56%
Jaswinder Singh Rajput 2 0.39% 1 2.56%
Pavel Machek 2 0.39% 1 2.56%
Vivek Goyal 1 0.20% 1 2.56%
Greg Kroah-Hartman 1 0.20% 1 2.56%
Chen Gang S 1 0.20% 1 2.56%
Total 508 39


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLKTRACE_H
#define BLKTRACE_H

#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
#include <linux/blk_types.h>

#if defined(CONFIG_BLK_DEV_IO_TRACE)

#include <linux/sysfs.h>

struct blk_trace {
	int trace_state;
	struct rchan *rchan;
	unsigned long __percpu *sequence;
	unsigned char __percpu *msg_data;
	u16 act_mask;
	u64 start_lba;
	u64 end_lba;
	u32 pid;
	u32 dev;
	struct dentry *dir;
	struct list_head running_list;
	atomic_t dropped;
};

extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
		struct cgroup_subsys_state *css, const char *fmt, ...);

/**
 * blk_add_trace_msg - Add a (simple) message to the blktrace stream
 * @q:		queue the io is for
 * @fmt:	format to print message in
 * args...	Variable argument list for format
 *
 * Description:
 *     Records a (simple) message onto the blktrace stream.
 *
 *     NOTE: BLK_TN_MAX_MSG characters are output at most.
 *     NOTE: Can not use 'static inline' due to presence of var args...
 *
 **/
#define blk_add_cgroup_trace_msg(q, css, fmt, ...)			\
	do {								\
		struct blk_trace *bt;					\
									\
		rcu_read_lock();					\
		bt = rcu_dereference((q)->blk_trace);			\
		if (unlikely(bt))					\
			__blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
		rcu_read_unlock();					\
	} while (0)
#define blk_add_trace_msg(q, fmt, ...)					\
	blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
#define BLK_TN_MAX_MSG		128

static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
	struct blk_trace *bt;
	bool ret;

	rcu_read_lock();
	bt = rcu_dereference(q->blk_trace);
	ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
	rcu_read_unlock();
	return ret;
}

extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
			   struct block_device *bdev,
			   char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);

#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg)		(-ENOTTY)
# define blk_trace_shutdown(q)				do { } while (0)
# define blk_add_driver_data(rq, data, len)		do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg)	(-ENOTTY)
# define blk_trace_startstop(q, start)			(-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...)			do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...)	do { } while (0)
# define blk_trace_note_message_enabled(q)		(false)

static inline int blk_trace_remove(struct request_queue *q)
{
	return -ENOTTY;
}
#endif /* CONFIG_BLK_DEV_IO_TRACE */

#ifdef CONFIG_COMPAT

struct compat_blk_user_trace_setup {
	char name[BLKTRACE_BDEV_SIZE];
	u16 act_mask;
	u32 buf_size;
	u32 buf_nr;
	compat_u64 start_lba;
	compat_u64 end_lba;
	u32 pid;
};
#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)

#endif

void blk_fill_rwbs(char *rwbs, blk_opf_t opf);

static inline sector_t blk_rq_trace_sector(struct request *rq)
{
	/*
	 * Tracing should ignore starting sector for passthrough requests and
	 * requests where starting sector didn't get set.
	 */
	if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
		return 0;
	return blk_rq_pos(rq);
}

static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
{
	return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
}

#endif