Commit 6f1c819c authored by Kent Overstreet's avatar Kent Overstreet Committed by Jens Axboe

dm: convert to bioset_init()/mempool_init()

Convert dm to embedded bio sets.
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent afeee514
......@@ -19,7 +19,7 @@
struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
mempool_t cell_pool;
struct rb_root cells;
};
......@@ -34,14 +34,15 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison *dm_bio_prison_create(void)
{
struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
int ret;
if (!prison)
return NULL;
spin_lock_init(&prison->lock);
prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
if (!prison->cell_pool) {
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
......@@ -54,21 +55,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create);
void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
mempool_destroy(prison->cell_pool);
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
return mempool_alloc(prison->cell_pool, gfp);
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
mempool_free(cell, prison->cell_pool);
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
......
......@@ -21,7 +21,7 @@ struct dm_bio_prison_v2 {
struct workqueue_struct *wq;
spinlock_t lock;
mempool_t *cell_pool;
mempool_t cell_pool;
struct rb_root cells;
};
......@@ -36,6 +36,7 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
{
struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
int ret;
if (!prison)
return NULL;
......@@ -43,8 +44,8 @@ struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
prison->wq = wq;
spin_lock_init(&prison->lock);
prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
if (!prison->cell_pool) {
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
......@@ -57,21 +58,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
{
mempool_destroy(prison->cell_pool);
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
{
return mempool_alloc(prison->cell_pool, gfp);
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell)
{
mempool_free(cell, prison->cell_pool);
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
......
......@@ -447,9 +447,9 @@ struct cache {
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
struct bio_set *bs;
struct bio_set bs;
mempool_t *migration_pool;
mempool_t migration_pool;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
......@@ -550,7 +550,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;
mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
if (!mg)
return NULL;
......@@ -569,7 +569,7 @@ static void free_migration(struct dm_cache_migration *mg)
if (atomic_dec_and_test(&cache->nr_allocated_migrations))
wake_up(&cache->migration_wait);
mempool_free(mg, cache->migration_pool);
mempool_free(mg, &cache->migration_pool);
}
/*----------------------------------------------------------------*/
......@@ -924,7 +924,7 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio);
......@@ -2011,7 +2011,7 @@ static void destroy(struct cache *cache)
{
unsigned i;
mempool_destroy(cache->migration_pool);
mempool_exit(&cache->migration_pool);
if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
......@@ -2047,8 +2047,7 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
if (cache->bs)
bioset_free(cache->bs);
bioset_exit(&cache->bs);
kfree(cache);
}
......@@ -2498,8 +2497,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->features = ca->features;
if (writethrough_mode(cache)) {
/* Create bioset for writethrough bios issued to origin */
cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
if (!cache->bs)
r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
if (r)
goto bad;
}
......@@ -2630,9 +2629,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
migration_cache);
if (!cache->migration_pool) {
r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
migration_cache);
if (r) {
*error = "Error creating cache's migration mempool";
goto bad;
}
......
......@@ -91,8 +91,8 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
struct bio_set *io_bs;
struct bio_set *bs;
struct bio_set io_bs;
struct bio_set bs;
/*
* freeze/thaw support require holding onto a super block
......
......@@ -143,14 +143,14 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
mempool_t *req_pool;
mempool_t *page_pool;
mempool_t *tag_pool;
mempool_t req_pool;
mempool_t page_pool;
mempool_t tag_pool;
unsigned tag_pool_max_sectors;
struct percpu_counter n_allocated_pages;
struct bio_set *bs;
struct bio_set bs;
struct mutex bio_alloc_lock;
struct workqueue_struct *io_queue;
......@@ -1245,7 +1245,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req)
ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
......@@ -1262,7 +1262,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
......@@ -1290,7 +1290,7 @@ static void crypt_free_req_skcipher(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct skcipher_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
mempool_free(req, &cc->req_pool);
}
static void crypt_free_req_aead(struct crypt_config *cc,
......@@ -1299,7 +1299,7 @@ static void crypt_free_req_aead(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct aead_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
mempool_free(req, &cc->req_pool);
}
static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
......@@ -1409,7 +1409,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
if (!clone)
goto out;
......@@ -1418,7 +1418,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
remaining_size = size;
for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
page = mempool_alloc(&cc->page_pool, gfp_mask);
if (!page) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
......@@ -1453,7 +1453,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
mempool_free(bv->bv_page, &cc->page_pool);
}
}
......@@ -1492,7 +1492,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
crypt_free_req(cc, io->ctx.r.req, base_bio);
if (unlikely(io->integrity_metadata_from_pool))
mempool_free(io->integrity_metadata, io->cc->tag_pool);
mempool_free(io->integrity_metadata, &io->cc->tag_pool);
else
kfree(io->integrity_metadata);
......@@ -1565,7 +1565,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* biovecs we don't need to worry about the block layer
* modifying the biovec array; so leverage bio_clone_fast().
*/
clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
......@@ -2219,17 +2219,16 @@ static void crypt_dtr(struct dm_target *ti)
crypt_free_tfms(cc);
if (cc->bs)
bioset_free(cc->bs);
bioset_exit(&cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->req_pool);
mempool_destroy(cc->tag_pool);
if (cc->page_pool)
if (mempool_initialized(&cc->page_pool))
WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
percpu_counter_destroy(&cc->n_allocated_pages);
mempool_exit(&cc->page_pool);
mempool_exit(&cc->req_pool);
mempool_exit(&cc->tag_pool);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
......@@ -2743,8 +2742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
iv_size_padding = align_mask;
}
ret = -ENOMEM;
/* ...| IV + padding | original IV | original sec. number | bio tag offset | */
additional_req_size = sizeof(struct dm_crypt_request) +
iv_size_padding + cc->iv_size +
......@@ -2752,8 +2749,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sizeof(uint64_t) +
sizeof(unsigned int);
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
if (!cc->req_pool) {
ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
if (ret) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
......@@ -2762,14 +2759,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
ARCH_KMALLOC_MINALIGN);
cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
if (!cc->page_pool) {
ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
if (ret) {
ti->error = "Cannot allocate page mempool";
goto bad;
}
cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
if (ret) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
}
......@@ -2806,11 +2803,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!cc->tag_pool_max_sectors)
cc->tag_pool_max_sectors = 1;
cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
if (ret) {
ti->error = "Cannot allocate integrity tags mempool";
ret = -ENOMEM;
goto bad;
}
......@@ -2903,7 +2899,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
}
}
......
......@@ -142,7 +142,7 @@ struct dm_integrity_c {
unsigned tag_size;
__s8 log2_tag_size;
sector_t start;
mempool_t *journal_io_mempool;
mempool_t journal_io_mempool;
struct dm_io_client *io;
struct dm_bufio_client *bufio;
struct workqueue_struct *metadata_wq;
......@@ -1817,7 +1817,7 @@ static void complete_copy_from_journal(unsigned long error, void *context)
struct journal_completion *comp = io->comp;
struct dm_integrity_c *ic = comp->ic;
remove_range(ic, &io->range);
mempool_free(io, ic->journal_io_mempool);
mempool_free(io, &ic->journal_io_mempool);
if (unlikely(error != 0))
dm_integrity_io_error(ic, "copying from journal", -EIO);
complete_journal_op(comp);
......@@ -1886,7 +1886,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
}
next_loop = k - 1;
io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
io->comp = &comp;
io->range.logical_sector = sec;
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
......@@ -1918,7 +1918,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
if (j == k) {
remove_range_unlocked(ic, &io->range);
spin_unlock_irq(&ic->endio_wait.lock);
mempool_free(io, ic->journal_io_mempool);
mempool_free(io, &ic->journal_io_mempool);
goto skip_io;
}
for (l = j; l < k; l++) {
......@@ -2980,9 +2980,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
if (!ic->journal_io_mempool) {
r = -ENOMEM;
r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
if (r) {
ti->error = "Cannot allocate mempool";
goto bad;
}
......@@ -3196,7 +3195,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->writer_wq);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_destroy(ic->journal_io_mempool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
if (ic->dev)
......
......@@ -22,8 +22,8 @@
#define DM_IO_MAX_REGIONS BITS_PER_LONG
struct dm_io_client {
mempool_t *pool;
struct bio_set *bios;
mempool_t pool;
struct bio_set bios;
};
/*
......@@ -49,32 +49,33 @@ struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
int ret;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
if (!client->pool)
ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
if (ret)
goto bad;
client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
if (!client->bios)
ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
if (ret)
goto bad;
return client;
bad:
mempool_destroy(client->pool);
mempool_exit(&client->pool);
kfree(client);
return ERR_PTR(-ENOMEM);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(dm_io_client_create);
void dm_io_client_destroy(struct dm_io_client *client)
{
mempool_destroy(client->pool);
bioset_free(client->bios);
mempool_exit(&client->pool);
bioset_exit(&client->bios);
kfree(client);
}
EXPORT_SYMBOL(dm_io_client_destroy);
......@@ -120,7 +121,7 @@ static void complete_io(struct io *io)
invalidate_kernel_vmap_range(io->vma_invalidate_address,
io->vma_invalidate_size);
mempool_free(io, io->client->pool);
mempool_free(io, &io->client->pool);
fn(error_bits, context);
}
......@@ -344,7 +345,7 @@ static void do_region(int op, int op_flags, unsigned region,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
}
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
......@@ -442,7 +443,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
init_completion(&sio.wait);
io = mempool_alloc(client->pool, GFP_NOIO);
io = mempool_alloc(&client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->client = client;
......@@ -474,7 +475,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
return -EIO;
}
io = mempool_alloc(client->pool, GFP_NOIO);
io = mempool_alloc(&client->pool, GFP_NOIO);
io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->client = client;
......
......@@ -47,7 +47,7 @@ struct dm_kcopyd_client {
wait_queue_head_t destroyq;
atomic_t nr_jobs;
mempool_t *job_pool;
mempool_t job_pool;
struct workqueue_struct *kcopyd_wq;
struct work_struct kcopyd_work;
......@@ -479,7 +479,7 @@ static int run_complete_job(struct kcopyd_job *job)
*/
if (job->master_job == job) {
mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
mempool_free(job, &kc->job_pool);
}
fn(read_err, write_err, context);
......@@ -751,7 +751,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* Allocate an array of jobs consisting of one master job
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
job = mempool_alloc(&kc->job_pool, GFP_NOIO);
mutex_init(&job->lock);
/*
......@@ -835,7 +835,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
{
struct kcopyd_job *job;
job = mempool_alloc(kc->job_pool, GFP_NOIO);
job = mempool_alloc(&kc->job_pool, GFP_NOIO);
memset(job, 0, sizeof(struct kcopyd_job));
job->kc = kc;
......@@ -879,7 +879,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
*---------------------------------------------------------------*/
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r = -ENOMEM;
int r;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
......@@ -892,14 +892,16 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
INIT_LIST_HEAD(&kc->pages_jobs);
kc->throttle = throttle;
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
if (r)
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
if (!kc->kcopyd_wq)
if (!kc->kcopyd_wq) {
r = -ENOMEM;
goto bad_workqueue;
}
kc->pages = NULL;
kc->nr_reserved_pages = kc->nr_free_pages = 0;
......@@ -923,7 +925,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
bad_client_pages:
destroy_workqueue(kc->kcopyd_wq);
bad_workqueue:
mempool_destroy(kc->job_pool);
mempool_exit(&kc->job_pool);
bad_slab:
kfree(kc);
......@@ -942,7 +944,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
destroy_workqueue(kc->kcopyd_wq);
dm_io_client_destroy(kc->io_client);
client_free_pages(kc);
mempool_destroy(kc->job_pool);
mempool_exit(&kc->job_pool);
kfree(kc);
}
EXPORT_SYMBOL(dm_kcopyd_client_destroy);
......@@ -76,7 +76,7 @@ struct log_c {
*/
uint32_t integrated_flush;
mempool_t *flush_entry_pool;
mempool_t flush_entry_pool;
};
static struct kmem_cache *_flush_entry_cache;
......@@ -249,11 +249,10 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
goto out;
}
lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE,
_flush_entry_cache);
if (!lc->flush_entry_pool) {
r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
_flush_entry_cache);
if (r) {
DMERR("Failed to create flush_entry_pool");
r = -ENOMEM;
goto out;
}
......@@ -313,7 +312,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
out:
kfree(devices_rdata);
if (r) {
mempool_destroy(lc->flush_entry_pool);
mempool_exit(&lc->flush_entry_pool);
kfree(lc);
kfree(ctr_str);
} else {
......@@ -342,7 +341,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
if (lc->log_dev)
dm_put_device(lc->ti, lc->log_dev);
mempool_destroy(lc->flush_entry_pool);
mempool_exit(&lc->flush_entry_pool);
kfree(lc->usr_argv_str);
kfree(lc);
......@@ -570,7 +569,7 @@ static int userspace_flush(struct dm_dirty_log *log)
int mark_list_is_empty;
int clear_list_is_empty;
struct dm_dirty_log_flush_entry *fe, *tmp_fe;
mempool_t *flush_entry_pool = lc->flush_entry_pool;
mempool_t *flush_entry_pool = &lc->flush_entry_pool;
spin_lock_irqsave(&lc->flush_lock, flags);
list_splice_init(&lc->mark_list, &mark_list);
......@@ -653,7 +652,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
struct dm_dirty_log_flush_entry *fe;
/* Wait for an allocation, but _never_ fail */
fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO);
fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
BUG_ON(!fe);
spin_lock_irqsave(&lc->flush_lock, flags);
......@@ -687,7 +686,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
* to cause the region to be resync'ed when the
* device is activated next time.
*/
fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC);
fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
if (!fe) {
DMERR("Failed to allocate memory to clear region.");
return;
......
......@@ -63,7 +63,7 @@ struct dm_region_hash {
/* hash table */
rwlock_t hash_lock;
mempool_t *region_pool;
mempool_t region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
......@@ -169,6 +169,7 @@ struct dm_region_hash *dm_region_hash_create(
struct dm_region_hash *rh;
unsigned nr_buckets, max_buckets;
size_t i;
int ret;
/*
* Calculate a suitable number of buckets for our hash
......@@ -220,9 +221,9 @@ struct dm_region_hash *dm_region_hash_create(
INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->flush_failure = 0;
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct dm_region));
if (!rh->region_pool) {
ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
sizeof(struct dm_region));
if (ret) {
vfree(rh->buckets);
kfree(rh);
rh = ERR_PTR(-ENOMEM);
......@@ -242,14 +243,14 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
list_for_each_entry_safe(reg, nreg, rh->buckets + h,
hash_list) {
BUG_ON(atomic_read(&reg->pending));
mempool_free(reg, rh->region_pool);
mempool_free(reg, &rh->region_pool);
}
}
if (rh->log)
dm_dirty_log_destroy(rh->log);
mempool_destroy(rh->region_pool);
mempool_exit(&rh->region_pool);
vfree(rh->buckets);
kfree(rh);
}
......@@ -287,7 +288,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
{
struct dm_region *reg, *nreg;
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
......@@ -303,7 +304,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
reg = __rh_lookup(rh, region);
if (reg)
/* We lost the race. */
mempool_free(nreg, rh->region_pool);
mempool_free(nreg, &rh->region_pool);
else {
__rh_insert(rh, nreg);
if (nreg->state == DM_RH_CLEAN) {
......@@ -481,17 +482,17 @@ void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
list_for_each_entry_safe(reg, next, &recovered, list) {
rh->log->type->clear_region(rh->log, reg->key);
complete_resync_work(reg, 1);
mempool_free(reg, rh->region_pool);
mempool_free(reg, &rh->region_pool);