cregit-Linux how code gets into the kernel

Release 4.14 drivers/md/multipath.c

Directory: drivers/md
/*
 * multipath.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * MULTIPATH management functions.
 *
 * derived from raid1.c.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "md.h"
#include "multipath.h"


#define MAX_WORK_PER_DISK 128


#define	NR_RESERVED_BUFS	32


static int multipath_map (struct mpconf *conf) { int i, disks = conf->raid_disks; /* * Later we do read balancing on the read side * now we use the first available disk. */ rcu_read_lock(); for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return i; } } rcu_read_unlock(); pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n"); return (-1); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown5751.35%1386.67%
Linus Torvalds5145.95%16.67%
Suzanne Wood32.70%16.67%
Total111100.00%15100.00%


static void multipath_reschedule_retry (struct multipath_bh *mp_bh) { unsigned long flags; struct mddev *mddev = mp_bh->mddev; struct mpconf *conf = mddev->private; spin_lock_irqsave(&conf->device_lock, flags); list_add(&mp_bh->retry_list, &conf->retry_list); spin_unlock_irqrestore(&conf->device_lock, flags); md_wakeup_thread(mddev->thread); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4561.64%225.00%
Neil Brown2635.62%562.50%
Al Viro22.74%112.50%
Total73100.00%8100.00%

/* * multipath_end_bh_io() is called when we have finished servicing a multipathed * operation and are ready to return a success/failure code to the buffer * cache layer. */
static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status) { struct bio *bio = mp_bh->master_bio; struct mpconf *conf = mp_bh->mddev->private; bio->bi_status = status; bio_endio(bio); mempool_free(mp_bh, conf->pool); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2546.30%225.00%
Neil Brown1629.63%337.50%
Christoph Hellwig814.81%225.00%
Al Viro59.26%112.50%
Total54100.00%8100.00%


static void multipath_end_request(struct bio *bio) { struct multipath_bh *mp_bh = bio->bi_private; struct mpconf *conf = mp_bh->mddev->private; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; if (!bio->bi_status) multipath_end_bh_io(mp_bh, 0); else if (!(bio->bi_opf & REQ_RAHEAD)) { /* * oops, IO error: */ char b[BDEVNAME_SIZE]; md_error (mp_bh->mddev, rdev); pr_info("multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, bio->bi_status); rdev_dec_pending(rdev, conf->mddev); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown7251.43%1352.00%
Linus Torvalds2719.29%28.00%
Jens Axboe1410.00%312.00%
Christoph Hellwig128.57%312.00%
Andrew Morton64.29%14.00%
Al Viro64.29%14.00%
Kent Overstreet21.43%14.00%
Adrian Bunk10.71%14.00%
Total140100.00%25100.00%


static bool multipath_make_request(struct mddev *mddev, struct bio * bio) { struct mpconf *conf = mddev->private; struct multipath_bh * mp_bh; struct multipath_info *multipath; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return true; } mp_bh = mempool_alloc(conf->pool, GFP_NOIO); mp_bh->master_bio = bio; mp_bh->mddev = mddev; mp_bh->path = multipath_map(conf); if (mp_bh->path < 0) { bio_io_error(bio); mempool_free(mp_bh, conf->pool); return true; } multipath = conf->multipaths + mp_bh->path; bio_init(&mp_bh->bio, NULL, 0); __bio_clone_fast(&mp_bh->bio, bio); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio); generic_make_request(&mp_bh->bio); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown10043.10%1548.39%
Linus Torvalds6327.16%26.45%
Christoph Hellwig198.19%412.90%
Ming Lei135.60%13.23%
Al Viro114.74%26.45%
Shaohua Li104.31%13.23%
Jens Axboe83.45%26.45%
Lei Ming41.72%13.23%
Kent Overstreet20.86%13.23%
Michael Christie10.43%13.23%
Tejun Heo10.43%13.23%
Total232100.00%31100.00%


static void multipath_status(struct seq_file *seq, struct mddev *mddev) { struct mpconf *conf = mddev->private; int i; seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); } rcu_read_unlock(); seq_printf (seq, "]"); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds6454.70%111.11%
Neil Brown5345.30%888.89%
Total117100.00%9100.00%


static int multipath_congested(struct mddev *mddev, int bits) { struct mpconf *conf = mddev->private; int i, ret = 0; rcu_read_lock(); for (i = 0; i < mddev->raid_disks ; i++) { struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); ret |= bdi_congested(q->backing_dev_info, bits); /* Just like multipath_map, we just check the * first available device */ break; } } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown11398.26%685.71%
Jens Axboe21.74%114.29%
Total115100.00%7100.00%

/* * Careful, this can execute in IRQ contexts as well! */
static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) { struct mpconf *conf = mddev->private; char b[BDEVNAME_SIZE]; if (conf->raid_disks - mddev->degraded <= 1) { /* * Uh oh, we can do nothing if this is our last path, but * first check if this is a queued request for a device * which has just failed. */ pr_warn("multipath: only one IO path left and IO error.\n"); /* leave it active... it's all we have */ return; } /* * Mark disk as unusable */ if (test_and_clear_bit(In_sync, &rdev->flags)) { unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; spin_unlock_irqrestore(&conf->device_lock, flags); } set_bit(Faulty, &rdev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); pr_err("multipath: IO failure on %s, disabling IO path.\n" "multipath: Operation continuing on %d IO paths.\n", bdevname(rdev->bdev, b), conf->raid_disks - mddev->degraded); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown10373.57%1482.35%
Linus Torvalds3424.29%15.88%
Shaohua Li21.43%15.88%
Al Viro10.71%15.88%
Total140100.00%17100.00%


static void print_multipath_conf (struct mpconf *conf) { int i; struct multipath_info *tmp; pr_debug("MULTIPATH conf printout:\n"); if (!conf) { pr_debug("(conf==NULL)\n"); return; } pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, conf->raid_disks); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; tmp = conf->multipaths + i; if (tmp->rdev) pr_debug(" disk%d, o:%d, dev:%s\n", i,!test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds8366.94%17.69%
Neil Brown4032.26%1184.62%
Al Viro10.81%17.69%
Total124100.00%13100.00%


static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) { struct mpconf *conf = mddev->private; struct request_queue *q; int err = -EEXIST; int path; struct multipath_info *p; int first = 0; int last = mddev->raid_disks - 1; if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; print_multipath_conf(conf); for (path = first; path <= last; path++) if ((p=conf->multipaths+path)->rdev == NULL) { q = rdev->bdev->bd_disk->queue; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); err = md_integrity_add_rdev(rdev, mddev); if (err) break; spin_lock_irq(&conf->device_lock); mddev->degraded--; rdev->raid_disk = path; set_bit(In_sync, &rdev->flags); spin_unlock_irq(&conf->device_lock); rcu_assign_pointer(p->rdev, rdev); err = 0; break; } print_multipath_conf(conf); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown12961.43%1466.67%
Linus Torvalds188.57%14.76%
Dan J Williams146.67%14.76%
Al Viro136.19%14.76%
Jesper Juhl125.71%14.76%
Martin K. Petersen115.24%14.76%
Andrew Morton94.29%14.76%
Suzanne Wood41.90%14.76%
Total210100.00%21100.00%


static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) { struct mpconf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; struct multipath_info *p = conf->multipaths + number; print_multipath_conf(conf); if (rdev == p->rdev) { if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number); err = -EBUSY; goto abort; } p->rdev = NULL; if (!test_bit(RemoveSynchronized, &rdev->flags)) { synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; p->rdev = rdev; goto abort; } } err = md_integrity_register(mddev); } abort: print_multipath_conf(conf); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown8852.38%1164.71%
Al Viro3923.21%211.76%
Linus Torvalds3017.86%15.88%
Andre Noll84.76%15.88%
Martin K. Petersen21.19%15.88%
Paul E. McKenney10.60%15.88%
Total168100.00%17100.00%

/* * This is a kernel thread which: * * 1. Retries failed read operations on working multipaths. * 2. Updates the raid superblock when problems encounter. * 3. Performs writes following reads for array syncronising. */
static void multipathd(struct md_thread *thread) { struct mddev *mddev = thread->mddev; struct multipath_bh *mp_bh; struct bio *bio; unsigned long flags; struct mpconf *conf = mddev->private; struct list_head *head = &conf->retry_list; md_check_recovery(mddev); for (;;) { char b[BDEVNAME_SIZE]; spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) break; mp_bh = list_entry(head->prev, struct multipath_bh, retry_list); list_del(head->prev); spin_unlock_irqrestore(&conf->device_lock, flags); bio = &mp_bh->bio; bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", bio_devname(bio, b), (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, BLK_STS_IOERR); } else { pr_err("multipath: %s: redirecting sector %llu to another IO path\n", bio_devname(bio, b), (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); bio->bi_iter.bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev); bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; generic_make_request(bio); } } spin_unlock_irqrestore(&conf->device_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown12141.16%1147.83%
Linus Torvalds9231.29%28.70%
Heiko Carstens268.84%14.35%
Al Viro155.10%28.70%
Shaohua Li113.74%14.35%
Andrew Morton103.40%14.35%
Kent Overstreet103.40%14.35%
Christoph Hellwig82.72%313.04%
Jens Axboe10.34%14.35%
Total294100.00%23100.00%


static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) { WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); return mddev->dev_sectors; }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams3193.94%150.00%
Neil Brown26.06%150.00%
Total33100.00%2100.00%


static int multipath_run (struct mddev *mddev) { struct mpconf *conf; int disk_idx; struct multipath_info *disk; struct md_rdev *rdev; int working_disks; if (md_check_no_bitmap(mddev)) return -EINVAL; if (mddev->level != LEVEL_MULTIPATH) { pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n", mdname(mddev), mddev->level); goto out; } /* * copy the already verified devices into our private MULTIPATH * bookkeeping area. [whatever we allocate in multipath_run(), * should be freed in multipath_free()] */ conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); mddev->private = conf; if (!conf) goto out; conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->multipaths) goto out_free_conf; working_disks = 0; rdev_for_each(rdev, mddev) { disk_idx = rdev->raid_disk; if (disk_idx < 0 || disk_idx >= mddev->raid_disks) continue; disk = conf->multipaths + disk_idx; disk->rdev = rdev; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); if (!test_bit(Faulty, &rdev->flags)) working_disks++; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); if (!working_disks) { pr_warn("multipath: no operational IO paths for %s\n", mdname(mddev)); goto out_free_conf; } mddev->degraded = conf->raid_disks - working_disks; conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, sizeof(struct multipath_bh)); if (conf->pool == NULL) goto out_free_conf; mddev->thread = md_register_thread(multipathd, mddev, "multipath"); if (!mddev->thread) goto out_free_conf; pr_info("multipath: array %s active with %d out of %d IO paths\n", mdname(mddev), conf->raid_disks - mddev->degraded, mddev->raid_disks); /* * Ok, everything is just fine now */ md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); if (md_integrity_register(mddev)) goto out_free_conf; return 0; out_free_conf: mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; out: return -EIO; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds18747.10%25.71%
Neil Brown14536.52%2262.86%
Andrew Morton164.03%25.71%
Andre Noll153.78%25.71%
Martin K. Petersen133.27%25.71%
Dan J Williams112.77%25.71%
Matthew Dobson51.26%12.86%
Thomas Gleixner41.01%12.86%
Sage Weil10.25%12.86%
Total397100.00%35100.00%


static void multipath_free(struct mddev *mddev, void *priv) { struct mpconf *conf = priv; mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Brown2151.22%571.43%
Linus Torvalds1946.34%114.29%
Al Viro12.44%114.29%
Total41100.00%7100.00%

static struct md_personality multipath_personality = { .name = "multipath", .level = LEVEL_MULTIPATH, .owner = THIS_MODULE, .make_request = multipath_make_request, .run = multipath_run, .free = multipath_free, .status = multipath_status, .error_handler = multipath_error, .hot_add_disk = multipath_add_disk, .hot_remove_disk= multipath_remove_disk, .size = multipath_size, .congested = multipath_congested, };
static int __init multipath_init (void) { return register_md_personality (&multipath_personality); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1593.75%150.00%
Al Viro16.25%150.00%
Total16100.00%2100.00%


static void __exit multipath_exit (void) { unregister_md_personality (&multipath_personality); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1280.00%133.33%
Neil Brown213.33%133.33%
Al Viro16.67%133.33%
Total15100.00%3100.00%

module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("simple multi-path personality for MD"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ MODULE_ALIAS("md-multipath"); MODULE_ALIAS("md-level--4");

Overall Contributors

PersonTokensPropCommitsCommitProp
Neil Brown115647.85%6959.48%
Linus Torvalds81633.77%32.59%
Al Viro1014.18%54.31%
Dan J Williams612.52%32.59%
Christoph Hellwig481.99%65.17%
Andrew Morton411.70%32.59%
Martin K. Petersen261.08%21.72%
Heiko Carstens261.08%10.86%
Jens Axboe251.03%54.31%
Shaohua Li230.95%32.59%
Andre Noll230.95%21.72%
Kent Overstreet140.58%10.86%
Ming Lei130.54%10.86%
Jesper Juhl120.50%10.86%
Suzanne Wood70.29%10.86%
Matthew Dobson50.21%10.86%
Tejun Heo40.17%21.72%
Thomas Gleixner40.17%10.86%
Lei Ming40.17%10.86%
Paul Gortmaker30.12%10.86%
Adrian Bunk10.04%10.86%
Michael Christie10.04%10.86%
Sage Weil10.04%10.86%
Paul E. McKenney10.04%10.86%
Total2416100.00%116100.00%
Directory: drivers/md
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.