Contributors: 19
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Alan Cox | 
          406 | 
          30.66% | 
          1 | 
          2.33% | 
        
        
          | Dan J Williams | 
          312 | 
          23.56% | 
          4 | 
          9.30% | 
        
        
          | Vivek Goyal | 
          126 | 
          9.52% | 
          2 | 
          4.65% | 
        
        
          | Christoph Hellwig | 
          106 | 
          8.01% | 
          5 | 
          11.63% | 
        
        
          | Milan Broz | 
          102 | 
          7.70% | 
          3 | 
          6.98% | 
        
        
          | Mike Snitzer | 
          72 | 
          5.44% | 
          5 | 
          11.63% | 
        
        
          | Toshi Kani | 
          66 | 
          4.98% | 
          1 | 
          2.33% | 
        
        
          | Andrew Morton | 
          29 | 
          2.19% | 
          3 | 
          6.98% | 
        
        
          | Mikulas Patocka | 
          28 | 
          2.11% | 
          5 | 
          11.63% | 
        
        
          | Paolo Bonzini | 
          27 | 
          2.04% | 
          1 | 
          2.33% | 
        
        
          | Alasdair G. Kergon | 
          17 | 
          1.28% | 
          4 | 
          9.30% | 
        
        
          | Damien Le Moal | 
          9 | 
          0.68% | 
          2 | 
          4.65% | 
        
        
          | Denis Semakin | 
          6 | 
          0.45% | 
          1 | 
          2.33% | 
        
        
          | Lars Marowsky-Bree | 
          5 | 
          0.38% | 
          1 | 
          2.33% | 
        
        
          | Kent Overstreet | 
          4 | 
          0.30% | 
          1 | 
          2.33% | 
        
        
          | Tomohiro Kusumi | 
          3 | 
          0.23% | 
          1 | 
          2.33% | 
        
        
          | Ajay Joshi | 
          3 | 
          0.23% | 
          1 | 
          2.33% | 
        
        
          | Joe Thornber | 
          2 | 
          0.15% | 
          1 | 
          2.33% | 
        
        
          | Kiyoshi Ueda | 
          1 | 
          0.08% | 
          1 | 
          2.33% | 
        
	  
	  
        
          | Total | 
          1324 | 
           | 
          43 | 
           | 
	    
	  
    
 
/*
 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
 *
 * This file is released under the GPL.
 */
#include "dm.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "linear"
/*
 * Linear: maps a linear range of a device.
 */
struct linear_c {
	struct dm_dev *dev;
	sector_t start;
};
/*
 * Construct a linear mapping: <dev_path> <offset>
 */
static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct linear_c *lc;
	unsigned long long tmp;
	char dummy;
	int ret;
	if (argc != 2) {
		ti->error = "Invalid argument count";
		return -EINVAL;
	}
	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
	if (lc == NULL) {
		ti->error = "Cannot allocate linear context";
		return -ENOMEM;
	}
	ret = -EINVAL;
	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
		ti->error = "Invalid device sector";
		goto bad;
	}
	lc->start = tmp;
	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
	if (ret) {
		ti->error = "Device lookup failed";
		goto bad;
	}
	ti->num_flush_bios = 1;
	ti->num_discard_bios = 1;
	ti->num_secure_erase_bios = 1;
	ti->num_write_same_bios = 1;
	ti->num_write_zeroes_bios = 1;
	ti->private = lc;
	return 0;
      bad:
	kfree(lc);
	return ret;
}
static void linear_dtr(struct dm_target *ti)
{
	struct linear_c *lc = (struct linear_c *) ti->private;
	dm_put_device(ti, lc->dev);
	kfree(lc);
}
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{
	struct linear_c *lc = ti->private;
	return lc->start + dm_target_offset(ti, bi_sector);
}
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{
	struct linear_c *lc = ti->private;
	bio_set_dev(bio, lc->dev->bdev);
	if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
		bio->bi_iter.bi_sector =
			linear_map_sector(ti, bio->bi_iter.bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
{
	linear_map_bio(ti, bio);
	return DM_MAPIO_REMAPPED;
}
static void linear_status(struct dm_target *ti, status_type_t type,
			  unsigned status_flags, char *result, unsigned maxlen)
{
	struct linear_c *lc = (struct linear_c *) ti->private;
	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;
	case STATUSTYPE_TABLE:
		snprintf(result, maxlen, "%s %llu", lc->dev->name,
				(unsigned long long)lc->start);
		break;
	}
}
static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
	struct linear_c *lc = (struct linear_c *) ti->private;
	struct dm_dev *dev = lc->dev;
	*bdev = dev->bdev;
	/*
	 * Only pass ioctls through if the device sizes match exactly.
	 */
	if (lc->start ||
	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
		return 1;
	return 0;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int linear_report_zones(struct dm_target *ti,
		struct dm_report_zones_args *args, unsigned int nr_zones)
{
	struct linear_c *lc = ti->private;
	sector_t sector = linear_map_sector(ti, args->next_sector);
	args->start = lc->start;
	return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
				   dm_report_zones_cb, args);
}
#endif
static int linear_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
	struct linear_c *lc = ti->private;
	return fn(ti, lc->dev, lc->start, ti->len, data);
}
#if IS_ENABLED(CONFIG_DAX_DRIVER)
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
		long nr_pages, void **kaddr, pfn_t *pfn)
{
	long ret;
	struct linear_c *lc = ti->private;
	struct block_device *bdev = lc->dev->bdev;
	struct dax_device *dax_dev = lc->dev->dax_dev;
	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
	dev_sector = linear_map_sector(ti, sector);
	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
	if (ret)
		return ret;
	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{
	struct linear_c *lc = ti->private;
	struct block_device *bdev = lc->dev->bdev;
	struct dax_device *dax_dev = lc->dev->dax_dev;
	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
	dev_sector = linear_map_sector(ti, sector);
	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
		return 0;
	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{
	struct linear_c *lc = ti->private;
	struct block_device *bdev = lc->dev->bdev;
	struct dax_device *dax_dev = lc->dev->dax_dev;
	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
	dev_sector = linear_map_sector(ti, sector);
	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
		return 0;
	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
				      size_t nr_pages)
{
	int ret;
	struct linear_c *lc = ti->private;
	struct block_device *bdev = lc->dev->bdev;
	struct dax_device *dax_dev = lc->dev->dax_dev;
	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
	dev_sector = linear_map_sector(ti, sector);
	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
	if (ret)
		return ret;
	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
#define linear_dax_copy_to_iter NULL
#define linear_dax_zero_page_range NULL
#endif
static struct target_type linear_target = {
	.name   = "linear",
	.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
	.report_zones = linear_report_zones,
#else
	.features = DM_TARGET_PASSES_INTEGRITY,
#endif
	.module = THIS_MODULE,
	.ctr    = linear_ctr,
	.dtr    = linear_dtr,
	.map    = linear_map,
	.status = linear_status,
	.prepare_ioctl = linear_prepare_ioctl,
	.iterate_devices = linear_iterate_devices,
	.direct_access = linear_dax_direct_access,
	.dax_copy_from_iter = linear_dax_copy_from_iter,
	.dax_copy_to_iter = linear_dax_copy_to_iter,
	.dax_zero_page_range = linear_dax_zero_page_range,
};
int __init dm_linear_init(void)
{
	int r = dm_register_target(&linear_target);
	if (r < 0)
		DMERR("register failed %d", r);
	return r;
}
void dm_linear_exit(void)
{
	dm_unregister_target(&linear_target);
}