Contributors: 11
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Kent Overstreet |
1189 |
80.83% |
15 |
46.88% |
Kuan-Wei Chiu |
158 |
10.74% |
1 |
3.12% |
Nicholas Swenson |
70 |
4.76% |
4 |
12.50% |
Christoph Hellwig |
20 |
1.36% |
4 |
12.50% |
Coly Li |
14 |
0.95% |
2 |
6.25% |
Gustavo A. R. Silva |
10 |
0.68% |
1 |
3.12% |
Lei Ming |
5 |
0.34% |
1 |
3.12% |
Guoqing Jiang |
2 |
0.14% |
1 |
3.12% |
Greg Kroah-Hartman |
1 |
0.07% |
1 |
3.12% |
Michael Christie |
1 |
0.07% |
1 |
3.12% |
Ming Lei |
1 |
0.07% |
1 |
3.12% |
Total |
1471 |
|
32 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Moving/copying garbage collector
*
* Copyright 2012 Google, Inc.
*/
#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
#include <trace/events/bcache.h>
struct moving_io {
struct closure cl;
struct keybuf_key *w;
struct data_insert_op op;
struct bbio bio;
};
static bool moving_pred(struct keybuf *buf, struct bkey *k)
{
struct cache_set *c = container_of(buf, struct cache_set,
moving_gc_keys);
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) &&
GC_MOVE(PTR_BUCKET(c, k, i)))
return true;
return false;
}
/* Moving GC - IO loop */
static CLOSURE_CALLBACK(moving_io_destructor)
{
closure_type(io, struct moving_io, cl);
kfree(io);
}
static CLOSURE_CALLBACK(write_moving_finish)
{
closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bio_free_pages(bio);
if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
up(&io->op.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor);
}
static void read_moving_endio(struct bio *bio)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
if (bio->bi_status)
io->op.status = bio->bi_status;
else if (!KEY_DIRTY(&b->key) &&
ptr_stale(io->op.c, &b->key, 0)) {
io->op.status = BLK_STS_IOERR;
}
bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
}
static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_private = &io->cl;
bch_bio_map(bio, NULL);
}
static CLOSURE_CALLBACK(write_moving)
{
closure_type(io, struct moving_io, cl);
struct data_insert_op *op = &io->op;
if (!op->status) {
moving_init(io);
io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
op->write_prio = 1;
op->bio = &io->bio.bio;
op->writeback = KEY_DIRTY(&io->w->key);
op->csum = KEY_CSUM(&io->w->key);
bkey_copy(&op->replace_key, &io->w->key);
op->replace = true;
closure_call(&op->cl, bch_data_insert, NULL, cl);
}
continue_at(cl, write_moving_finish, op->wq);
}
static CLOSURE_CALLBACK(read_moving_submit)
{
closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
continue_at(cl, write_moving, io->op.wq);
}
static void read_moving(struct cache_set *c)
{
struct keybuf_key *w;
struct moving_io *io;
struct bio *bio;
struct closure cl;
closure_init_stack(&cl);
/* XXX: if we error, background writeback could stall indefinitely */
while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
&MAX_KEY, moving_pred);
if (!w)
break;
if (ptr_stale(c, &w->key, 0)) {
bch_keybuf_del(&c->moving_gc_keys, w);
continue;
}
io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
GFP_KERNEL);
if (!io)
goto err;
w->private = io;
io->w = w;
io->op.inode = KEY_INODE(&w->key);
io->op.c = c;
io->op.wq = c->moving_gc_wq;
moving_init(io);
bio = &io->bio.bio;
bio->bi_opf = REQ_OP_READ;
bio->bi_end_io = read_moving_endio;
if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
down(&c->moving_in_flight);
closure_call(&io->cl, read_moving_submit, NULL, &cl);
}
if (0) {
err: if (!IS_ERR_OR_NULL(w->private))
kfree(w->private);
bch_keybuf_del(&c->moving_gc_keys, w);
}
closure_sync(&cl);
}
static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
{
struct bucket **_l = (struct bucket **)l;
struct bucket **_r = (struct bucket **)r;
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
}
static void new_bucket_swap(void *l, void *r, void __always_unused *args)
{
struct bucket **_l = l;
struct bucket **_r = r;
swap(*_l, *_r);
}
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
{
struct cache *ca = c->cache;
struct bucket *b;
unsigned long sectors_to_move, reserve_sectors;
const struct min_heap_callbacks callbacks = {
.less = new_bucket_cmp,
.swp = new_bucket_swap,
};
if (!c->copy_gc_enabled)
return;
mutex_lock(&c->bucket_lock);
sectors_to_move = 0;
reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.nr = 0;
for_each_bucket(b, ca) {
if (GC_MARK(b) == GC_MARK_METADATA ||
!GC_SECTORS_USED(b) ||
GC_SECTORS_USED(b) == ca->sb.bucket_size ||
atomic_read(&b->pin))
continue;
if (!min_heap_full(&ca->heap)) {
sectors_to_move += GC_SECTORS_USED(b);
min_heap_push(&ca->heap, &b, &callbacks, NULL);
} else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
sectors_to_move -= bucket_heap_top(ca);
sectors_to_move += GC_SECTORS_USED(b);
ca->heap.data[0] = b;
min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
}
}
while (sectors_to_move > reserve_sectors) {
if (ca->heap.nr) {
b = min_heap_peek(&ca->heap)[0];
min_heap_pop(&ca->heap, &callbacks, NULL);
}
sectors_to_move -= GC_SECTORS_USED(b);
}
while (ca->heap.nr) {
b = min_heap_peek(&ca->heap)[0];
min_heap_pop(&ca->heap, &callbacks, NULL);
SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
c->moving_gc_keys.last_scanned = ZERO_KEY;
read_moving(c);
}
void bch_moving_init_cache_set(struct cache_set *c)
{
bch_keybuf_init(&c->moving_gc_keys);
sema_init(&c->moving_in_flight, 64);
}