bcache: convert to bioset_init()/mempool_init()
Convert bcache to embedded bio sets. Reviewed-by: Coly Li <colyli@suse.de> Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/558/head
parent
b906bbb699
commit
d19936a266
|
|
@ -269,7 +269,7 @@ struct bcache_device {
|
||||||
atomic_t *stripe_sectors_dirty;
|
atomic_t *stripe_sectors_dirty;
|
||||||
unsigned long *full_dirty_stripes;
|
unsigned long *full_dirty_stripes;
|
||||||
|
|
||||||
struct bio_set *bio_split;
|
struct bio_set bio_split;
|
||||||
|
|
||||||
unsigned data_csum:1;
|
unsigned data_csum:1;
|
||||||
|
|
||||||
|
|
@ -530,9 +530,9 @@ struct cache_set {
|
||||||
struct closure sb_write;
|
struct closure sb_write;
|
||||||
struct semaphore sb_write_mutex;
|
struct semaphore sb_write_mutex;
|
||||||
|
|
||||||
mempool_t *search;
|
mempool_t search;
|
||||||
mempool_t *bio_meta;
|
mempool_t bio_meta;
|
||||||
struct bio_set *bio_split;
|
struct bio_set bio_split;
|
||||||
|
|
||||||
/* For the btree cache */
|
/* For the btree cache */
|
||||||
struct shrinker shrink;
|
struct shrinker shrink;
|
||||||
|
|
@ -657,7 +657,7 @@ struct cache_set {
|
||||||
* A btree node on disk could have too many bsets for an iterator to fit
|
* A btree node on disk could have too many bsets for an iterator to fit
|
||||||
* on the stack - have to dynamically allocate them
|
* on the stack - have to dynamically allocate them
|
||||||
*/
|
*/
|
||||||
mempool_t *fill_iter;
|
mempool_t fill_iter;
|
||||||
|
|
||||||
struct bset_sort_state sort;
|
struct bset_sort_state sort;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1118,8 +1118,7 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
|
||||||
|
|
||||||
void bch_bset_sort_state_free(struct bset_sort_state *state)
|
void bch_bset_sort_state_free(struct bset_sort_state *state)
|
||||||
{
|
{
|
||||||
if (state->pool)
|
mempool_exit(&state->pool);
|
||||||
mempool_destroy(state->pool);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
|
int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
|
||||||
|
|
@ -1129,11 +1128,7 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
|
||||||
state->page_order = page_order;
|
state->page_order = page_order;
|
||||||
state->crit_factor = int_sqrt(1 << page_order);
|
state->crit_factor = int_sqrt(1 << page_order);
|
||||||
|
|
||||||
state->pool = mempool_create_page_pool(1, page_order);
|
return mempool_init_page_pool(&state->pool, 1, page_order);
|
||||||
if (!state->pool)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bch_bset_sort_state_init);
|
EXPORT_SYMBOL(bch_bset_sort_state_init);
|
||||||
|
|
||||||
|
|
@ -1191,7 +1186,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
|
||||||
|
|
||||||
BUG_ON(order > state->page_order);
|
BUG_ON(order > state->page_order);
|
||||||
|
|
||||||
outp = mempool_alloc(state->pool, GFP_NOIO);
|
outp = mempool_alloc(&state->pool, GFP_NOIO);
|
||||||
out = page_address(outp);
|
out = page_address(outp);
|
||||||
used_mempool = true;
|
used_mempool = true;
|
||||||
order = state->page_order;
|
order = state->page_order;
|
||||||
|
|
@ -1220,7 +1215,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (used_mempool)
|
if (used_mempool)
|
||||||
mempool_free(virt_to_page(out), state->pool);
|
mempool_free(virt_to_page(out), &state->pool);
|
||||||
else
|
else
|
||||||
free_pages((unsigned long) out, order);
|
free_pages((unsigned long) out, order);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -347,7 +347,7 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
|
||||||
/* Sorting */
|
/* Sorting */
|
||||||
|
|
||||||
struct bset_sort_state {
|
struct bset_sort_state {
|
||||||
mempool_t *pool;
|
mempool_t pool;
|
||||||
|
|
||||||
unsigned page_order;
|
unsigned page_order;
|
||||||
unsigned crit_factor;
|
unsigned crit_factor;
|
||||||
|
|
|
||||||
|
|
@ -204,7 +204,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||||
struct bset *i = btree_bset_first(b);
|
struct bset *i = btree_bset_first(b);
|
||||||
struct btree_iter *iter;
|
struct btree_iter *iter;
|
||||||
|
|
||||||
iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
|
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
|
||||||
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
|
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
|
||||||
iter->used = 0;
|
iter->used = 0;
|
||||||
|
|
||||||
|
|
@ -271,7 +271,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||||
bch_bset_init_next(&b->keys, write_block(b),
|
bch_bset_init_next(&b->keys, write_block(b),
|
||||||
bset_magic(&b->c->sb));
|
bset_magic(&b->c->sb));
|
||||||
out:
|
out:
|
||||||
mempool_free(iter, b->c->fill_iter);
|
mempool_free(iter, &b->c->fill_iter);
|
||||||
return;
|
return;
|
||||||
err:
|
err:
|
||||||
set_btree_node_io_error(b);
|
set_btree_node_io_error(b);
|
||||||
|
|
|
||||||
|
|
@ -17,12 +17,12 @@
|
||||||
void bch_bbio_free(struct bio *bio, struct cache_set *c)
|
void bch_bbio_free(struct bio *bio, struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct bbio *b = container_of(bio, struct bbio, bio);
|
struct bbio *b = container_of(bio, struct bbio, bio);
|
||||||
mempool_free(b, c->bio_meta);
|
mempool_free(b, &c->bio_meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bio *bch_bbio_alloc(struct cache_set *c)
|
struct bio *bch_bbio_alloc(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
|
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
|
||||||
struct bio *bio = &b->bio;
|
struct bio *bio = &b->bio;
|
||||||
|
|
||||||
bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
|
bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
|
||||||
|
|
|
||||||
|
|
@ -213,7 +213,7 @@ static void bch_data_insert_start(struct closure *cl)
|
||||||
do {
|
do {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct bio_set *split = op->c->bio_split;
|
struct bio_set *split = &op->c->bio_split;
|
||||||
|
|
||||||
/* 1 for the device pointer and 1 for the chksum */
|
/* 1 for the device pointer and 1 for the chksum */
|
||||||
if (bch_keylist_realloc(&op->insert_keys,
|
if (bch_keylist_realloc(&op->insert_keys,
|
||||||
|
|
@ -548,7 +548,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
|
||||||
|
|
||||||
n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
|
n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
|
||||||
KEY_OFFSET(k) - bio->bi_iter.bi_sector),
|
KEY_OFFSET(k) - bio->bi_iter.bi_sector),
|
||||||
GFP_NOIO, s->d->bio_split);
|
GFP_NOIO, &s->d->bio_split);
|
||||||
|
|
||||||
bio_key = &container_of(n, struct bbio, bio)->key;
|
bio_key = &container_of(n, struct bbio, bio)->key;
|
||||||
bch_bkey_copy_single_ptr(bio_key, k, ptr);
|
bch_bkey_copy_single_ptr(bio_key, k, ptr);
|
||||||
|
|
@ -707,7 +707,7 @@ static void search_free(struct closure *cl)
|
||||||
|
|
||||||
bio_complete(s);
|
bio_complete(s);
|
||||||
closure_debug_destroy(cl);
|
closure_debug_destroy(cl);
|
||||||
mempool_free(s, s->d->c->search);
|
mempool_free(s, &s->d->c->search);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct search *search_alloc(struct bio *bio,
|
static inline struct search *search_alloc(struct bio *bio,
|
||||||
|
|
@ -715,7 +715,7 @@ static inline struct search *search_alloc(struct bio *bio,
|
||||||
{
|
{
|
||||||
struct search *s;
|
struct search *s;
|
||||||
|
|
||||||
s = mempool_alloc(d->c->search, GFP_NOIO);
|
s = mempool_alloc(&d->c->search, GFP_NOIO);
|
||||||
|
|
||||||
closure_init(&s->cl, NULL);
|
closure_init(&s->cl, NULL);
|
||||||
do_bio_hook(s, bio, request_endio);
|
do_bio_hook(s, bio, request_endio);
|
||||||
|
|
@ -864,7 +864,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||||
s->cache_missed = 1;
|
s->cache_missed = 1;
|
||||||
|
|
||||||
if (s->cache_miss || s->iop.bypass) {
|
if (s->cache_miss || s->iop.bypass) {
|
||||||
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
|
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
|
||||||
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
|
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
|
||||||
goto out_submit;
|
goto out_submit;
|
||||||
}
|
}
|
||||||
|
|
@ -887,14 +887,14 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||||
|
|
||||||
s->iop.replace = true;
|
s->iop.replace = true;
|
||||||
|
|
||||||
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
|
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
|
||||||
|
|
||||||
/* btree_search_recurse()'s btree iterator is no good anymore */
|
/* btree_search_recurse()'s btree iterator is no good anymore */
|
||||||
ret = miss == bio ? MAP_DONE : -EINTR;
|
ret = miss == bio ? MAP_DONE : -EINTR;
|
||||||
|
|
||||||
cache_bio = bio_alloc_bioset(GFP_NOWAIT,
|
cache_bio = bio_alloc_bioset(GFP_NOWAIT,
|
||||||
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
|
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
|
||||||
dc->disk.bio_split);
|
&dc->disk.bio_split);
|
||||||
if (!cache_bio)
|
if (!cache_bio)
|
||||||
goto out_submit;
|
goto out_submit;
|
||||||
|
|
||||||
|
|
@ -1008,7 +1008,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||||
struct bio *flush;
|
struct bio *flush;
|
||||||
|
|
||||||
flush = bio_alloc_bioset(GFP_NOIO, 0,
|
flush = bio_alloc_bioset(GFP_NOIO, 0,
|
||||||
dc->disk.bio_split);
|
&dc->disk.bio_split);
|
||||||
if (!flush) {
|
if (!flush) {
|
||||||
s->iop.status = BLK_STS_RESOURCE;
|
s->iop.status = BLK_STS_RESOURCE;
|
||||||
goto insert_data;
|
goto insert_data;
|
||||||
|
|
@ -1021,7 +1021,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||||
closure_bio_submit(s->iop.c, flush, cl);
|
closure_bio_submit(s->iop.c, flush, cl);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
|
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
|
||||||
/* I/O request sent to backing device */
|
/* I/O request sent to backing device */
|
||||||
bio->bi_end_io = backing_request_endio;
|
bio->bi_end_io = backing_request_endio;
|
||||||
closure_bio_submit(s->iop.c, bio, cl);
|
closure_bio_submit(s->iop.c, bio, cl);
|
||||||
|
|
|
||||||
|
|
@ -753,8 +753,7 @@ static void bcache_device_free(struct bcache_device *d)
|
||||||
put_disk(d->disk);
|
put_disk(d->disk);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (d->bio_split)
|
bioset_exit(&d->bio_split);
|
||||||
bioset_free(d->bio_split);
|
|
||||||
kvfree(d->full_dirty_stripes);
|
kvfree(d->full_dirty_stripes);
|
||||||
kvfree(d->stripe_sectors_dirty);
|
kvfree(d->stripe_sectors_dirty);
|
||||||
|
|
||||||
|
|
@ -796,9 +795,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
||||||
if (idx < 0)
|
if (idx < 0)
|
||||||
return idx;
|
return idx;
|
||||||
|
|
||||||
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
|
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
|
||||||
BIOSET_NEED_BVECS |
|
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
|
||||||
BIOSET_NEED_RESCUER)) ||
|
|
||||||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
|
!(d->disk = alloc_disk(BCACHE_MINORS))) {
|
||||||
ida_simple_remove(&bcache_device_idx, idx);
|
ida_simple_remove(&bcache_device_idx, idx);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
@ -1500,14 +1498,10 @@ static void cache_set_free(struct closure *cl)
|
||||||
|
|
||||||
if (c->moving_gc_wq)
|
if (c->moving_gc_wq)
|
||||||
destroy_workqueue(c->moving_gc_wq);
|
destroy_workqueue(c->moving_gc_wq);
|
||||||
if (c->bio_split)
|
bioset_exit(&c->bio_split);
|
||||||
bioset_free(c->bio_split);
|
mempool_exit(&c->fill_iter);
|
||||||
if (c->fill_iter)
|
mempool_exit(&c->bio_meta);
|
||||||
mempool_destroy(c->fill_iter);
|
mempool_exit(&c->search);
|
||||||
if (c->bio_meta)
|
|
||||||
mempool_destroy(c->bio_meta);
|
|
||||||
if (c->search)
|
|
||||||
mempool_destroy(c->search);
|
|
||||||
kfree(c->devices);
|
kfree(c->devices);
|
||||||
|
|
||||||
mutex_lock(&bch_register_lock);
|
mutex_lock(&bch_register_lock);
|
||||||
|
|
@ -1718,21 +1712,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
|
||||||
INIT_LIST_HEAD(&c->btree_cache_freed);
|
INIT_LIST_HEAD(&c->btree_cache_freed);
|
||||||
INIT_LIST_HEAD(&c->data_buckets);
|
INIT_LIST_HEAD(&c->data_buckets);
|
||||||
|
|
||||||
c->search = mempool_create_slab_pool(32, bch_search_cache);
|
|
||||||
if (!c->search)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
iter_size = (sb->bucket_size / sb->block_size + 1) *
|
iter_size = (sb->bucket_size / sb->block_size + 1) *
|
||||||
sizeof(struct btree_iter_set);
|
sizeof(struct btree_iter_set);
|
||||||
|
|
||||||
if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
|
if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
|
||||||
!(c->bio_meta = mempool_create_kmalloc_pool(2,
|
mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
|
||||||
|
mempool_init_kmalloc_pool(&c->bio_meta, 2,
|
||||||
sizeof(struct bbio) + sizeof(struct bio_vec) *
|
sizeof(struct bbio) + sizeof(struct bio_vec) *
|
||||||
bucket_pages(c))) ||
|
bucket_pages(c)) ||
|
||||||
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
|
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
|
||||||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
|
bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
|
||||||
BIOSET_NEED_BVECS |
|
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
|
||||||
BIOSET_NEED_RESCUER)) ||
|
|
||||||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
|
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
|
||||||
!(c->moving_gc_wq = alloc_workqueue("bcache_gc",
|
!(c->moving_gc_wq = alloc_workqueue("bcache_gc",
|
||||||
WQ_MEM_RECLAIM, 0)) ||
|
WQ_MEM_RECLAIM, 0)) ||
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue