Commit ec667158 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm

Pull device-mapper updates from Alasdair Kergon:
 "Allow devices that hold metadata for the device-mapper thin
  provisioning target to be extended easily; allow WRITE SAME on
  multipath devices; an assortment of little fixes and clean-ups."

* tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (21 commits)
  dm cache: set config value
  dm cache: move config fns
  dm thin: generate event when metadata threshold passed
  dm persistent metadata: add space map threshold callback
  dm persistent data: add threshold callback to space map
  dm thin: detect metadata device resizing
  dm persistent data: support space map resizing
  dm thin: open dev read only when possible
  dm thin: refactor data dev resize
  dm cache: replace memcpy with struct assignment
  dm cache: fix typos in comments
  dm cache policy: fix description of lookup fn
  dm: document iterate_devices
  dm persistent data: fix error message typos
  dm cache: tune migration throttling
  dm mpath: enable WRITE SAME support
  dm table: fix write same support
  dm bufio: avoid a possible __vmalloc deadlock
  dm snapshot: fix error return code in snapshot_ctr
  dm cache: fix error return code in cache_create
  ...
parents f755407d 2f14f4b5
......@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
unsigned noio_flag;
void *ptr;
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
......@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
}
*data_mode = DATA_MODE_VMALLOC;
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
/*
* __vmalloc allocates the data pages and auxiliary structures with
* gfp_flags that were specified, but pagetables are always allocated
* with GFP_KERNEL, no matter what was specified as gfp_mask.
*
* Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
* all allocations done by this process (including pagetables) are done
* as if GFP_NOIO was specified.
*/
if (gfp_mask & __GFP_NORETRY)
noio_flag = memalloc_noio_save();
ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);
return ptr;
}
/*
......
......@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_read(&cmd->root_lock);
memcpy(stats, &cmd->stats, sizeof(*stats));
*stats = cmd->stats;
up_read(&cmd->root_lock);
}
......@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_write(&cmd->root_lock);
memcpy(&cmd->stats, stats, sizeof(*stats));
cmd->stats = *stats;
up_write(&cmd->root_lock);
}
......
......@@ -130,8 +130,8 @@ struct dm_cache_policy {
*
* Must not block.
*
* Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
* would be typical).
* Returns 0 if in cache, -ENOENT if not, < 0 for other errors
* (-EWOULDBLOCK would be typical).
*/
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
......
......@@ -205,7 +205,7 @@ struct per_bio_data {
/*
* writethrough fields. These MUST remain at the end of this
* structure and the 'cache' member must be the first as it
* is used to determine the offsetof the writethrough fields.
* is used to determine the offset of the writethrough fields.
*/
struct cache *cache;
dm_cblock_t cblock;
......@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
return r;
}
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
static bool is_dirty(struct cache *cache, dm_cblock_t b)
{
......@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
}
/*----------------------------------------------------------------*/
static bool block_size_is_power_of_two(struct cache *cache)
{
return cache->sectors_per_block_shift >= 0;
......@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
/*
* We can't issue this bio directly, since we're in interrupt
* context. So it get's put on a bio list for processing by the
* context. So it gets put on a bio list for processing by the
* worker thread.
*/
defer_writethrough_bio(pb->cache, bio);
......@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
policy_tick(cache->policy);
wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
......@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
static struct kmem_cache *migration_cache;
static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
#define NOT_CORE_OPTION 1
static int process_config_option(struct cache *cache, const char *key, const char *value)
{
unsigned long tmp;
if (!strcasecmp(key, "migration_threshold")) {
if (kstrtoul(value, 10, &tmp))
return -EINVAL;
cache->migration_threshold = tmp;
return 0;
}
return NOT_CORE_OPTION;
}
static int set_config_value(struct cache *cache, const char *key, const char *value)
{
int r = process_config_option(cache, key, value);
if (r == NOT_CORE_OPTION)
r = policy_set_config_value(cache->policy, key, value);
if (r)
DMWARN("bad config value for %s: %s", key, value);
return r;
}
static int set_config_values(struct cache *cache, int argc, const char **argv)
{
int r = 0;
......@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
}
while (argc) {
r = policy_set_config_value(p, argv[0], argv[1]);
if (r) {
DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
argv[0], argv[1]);
return r;
}
r = set_config_value(cache, argv[0], argv[1]);
if (r)
break;
argc -= 2;
argv += 2;
......@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
int r;
cache->policy = dm_cache_policy_create(ca->policy_name,
cache->cache_size,
cache->origin_sectors,
......@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return -ENOMEM;
}
r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
dm_cache_policy_destroy(cache->policy);
cache->policy = NULL;
}
return r;
return 0;
}
/*
......@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
return discard_block_size;
}
#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
{
......@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features));
cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
......@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
r = create_cache_policy(cache, ca, error);
if (r)
goto bad;
cache->policy_nr_args = ca->policy_argc;
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
goto bad;
}
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format,
......@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
......@@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT("Error");
}
#define NOT_CORE_OPTION 1
static int process_config_option(struct cache *cache, char **argv)
{
unsigned long tmp;
if (!strcasecmp(argv[0], "migration_threshold")) {
if (kstrtoul(argv[1], 10, &tmp))
return -EINVAL;
cache->migration_threshold = tmp;
return 0;
}
return NOT_CORE_OPTION;
}
/*
* Supports <key> <value>.
*
......@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
*/
static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
int r;
struct cache *cache = ti->private;
if (argc != 2)
return -EINVAL;
r = process_config_option(cache, argv);
if (r == NOT_CORE_OPTION)
return policy_set_config_value(cache->policy, argv[0], argv[1]);
return r;
return set_config_value(cache, argv[0], argv[1]);
}
static int cache_iterate_devices(struct dm_target *ti,
......@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
.version = {1, 1, 0},
.version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
......
......@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
return 0;
......
......@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions";
r = -ENOMEM;
goto bad_pending_pool;
}
......
......@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
sector_t width;
sector_t width, tmp_len;
uint32_t stripes;
uint32_t chunk_size;
int r;
......@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
width = ti->len;
if (sector_div(width, chunk_size)) {
if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
"chunk size";
"number of stripes";
return -EINVAL;
}
if (sector_div(width, stripes)) {
tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by "
"number of stripes";
"chunk size";
return -EINVAL;
}
......
......@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
return false;
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}
......
......@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
return r;
}
static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
{
int r;
dm_block_t old_count;
r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
r = dm_sm_get_nr_blocks(sm, &old_count);
if (r)
return r;
......@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
return 0;
if (new_count < old_count) {
DMERR("cannot reduce size of data device");
DMERR("cannot reduce size of space map");
return -EINVAL;
}
return dm_sm_extend(pmd->data_sm, new_count - old_count);
return dm_sm_extend(sm, new_count - old_count);
}
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
......@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
down_write(&pmd->root_lock);
if (!pmd->fail_io)
r = __resize_data_dev(pmd, new_count);
r = __resize_space_map(pmd->data_sm, new_count);
up_write(&pmd->root_lock);
return r;
}
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
down_write(&pmd->root_lock);
if (!pmd->fail_io)
r = __resize_space_map(pmd->metadata_sm, new_count);
up_write(&pmd->root_lock);
return r;
......@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
dm_bm_set_read_only(pmd->bm);
up_write(&pmd->root_lock);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
int r;
down_write(&pmd->root_lock);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
up_write(&pmd->root_lock);
return r;
}
......@@ -8,6 +8,7 @@
#define DM_THIN_METADATA_H
#include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
#define THIN_METADATA_BLOCK_SIZE 4096
......@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
* blocks would be lost.
*/
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
/*
* Flicks the underlying block manager into read only mode, so you know
......@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
*/
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context);
/*----------------------------------------------------------------*/
#endif
......@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r;
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark, sending event.",
DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
pool->low_water_triggered = 1;
......@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
bio_io_error(bio);
}
/*
* FIXME: should we also commit due to size of transaction, measured in
* metadata blocks?
*/
static int need_commit_due_to_time(struct pool *pool)
{
return jiffies < pool->last_commit_jiffies ||
......@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
return r;
}
static void metadata_low_callback(void *context)
{
struct pool *pool = context;
DMWARN("%s: reached low water mark for metadata device: sending event.",
dm_device_name(pool->pool_md));
dm_table_event(pool->ti->table);
}
static sector_t get_metadata_dev_size(struct block_device *bdev)
{
sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
char buffer[BDEVNAME_SIZE];
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
}
return metadata_dev_size;
}
static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
return metadata_dev_size;
}
/*
* When a metadata threshold is crossed a dm event is triggered, and
* userland should respond by growing the metadata device. We could let
* userland set the threshold, like we do with the data threshold, but I'm
* not sure they know enough to do this well.
*/
static dm_block_t calc_metadata_threshold(struct pool_c *pt)
{
/*
* 4M is ample for all ops with the possible exception of thin
* device deletion which is harmless if it fails (just retry the
* delete after you've grown the device).
*/
dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
return min((dm_block_t)1024ULL /* 4M */, quarter);
}
/*
* thin-pool <metadata dev> <data dev>
* <data block size (sectors)>
......@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned long block_size;
dm_block_t low_water_blocks;
struct dm_dev *metadata_dev;
sector_t metadata_dev_size;
char b[BDEVNAME_SIZE];
fmode_t metadata_mode;
/*
* FIXME Remove validation from scope of lock.
......@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto out_unlock;
}
as.argc = argc;
as.argv = argv;
r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
/*
* Set default pool features.
*/
pool_features_init(&pf);
dm_consume_args(&as, 4);
r = parse_pool_features(&as, &pf, ti);
if (r)
goto out_unlock;
metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
if (r) {
ti->error = "Error opening metadata block device";
goto out_unlock;
}
metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
/*
* Run for the side-effect of possibly issuing a warning if the
* device is too big.
*/
(void) get_metadata_dev_size(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
......@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
/*
* Set default pool features.
*/
pool_features_init(&pf);
dm_consume_args(&as, 4);
r = parse_pool_features(&as, &pf, ti);
if (r)
goto out;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt) {
r = -ENOMEM;
......@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
ti->private = pt;
r = dm_pool_register_metadata_threshold(pt->pool->pmd,
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
if (r)
goto out_free_pt;
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
......@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
return r;
}
/*
* Retrieves the number of blocks of the data device from
* the superblock and compares it to the actual device size,
* thus resizing the data device in case it has grown.
*
* This both copes with opening preallocated data devices in the ctr
* being followed by a resume
* -and-
* calling the resume method individually after userspace has
* grown the data device in reaction to a table event.
*/
static int pool_preresume(struct dm_target *ti)
static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
{
int r;
struct pool_c *pt = ti->private;
......@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
sector_t data_size = ti->len;
dm_block_t sb_data_size;
/*
* Take control of the pool object.
*/
r = bind_control_target(pool, ti);
if (r)
return r;
*need_commit = false;
(void) sector_div(data_size, pool->sectors_per_block);
......@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
}
if (data_size < sb_data_size) {
DMERR("pool target too small, is %llu blocks (expected %llu)",
DMERR("pool target (%llu blocks) too small: expected %llu",
(unsigned long long)data_size, sb_data_size);
return -EINVAL;
......@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
DMERR("failed to resize data device");
/* FIXME Stricter than necessary: Rollback transaction instead here */
set_pool_mode(pool, PM_READ_ONLY);
return r;
}
(void) commit_or_fallback(pool);
*need_commit = true;
}
return 0;
}
static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
{
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
dm_block_t metadata_dev_size, sb_metadata_dev_size;
*need_commit = false;
metadata_dev_size = get_metadata_dev_size(pool->md_dev);
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
DMERR("failed to retrieve data device size");
return r;
}
if (metadata_dev_size < sb_metadata_dev_size) {
DMERR("metadata device (%llu sectors) too small: expected %llu",
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
DMERR("failed to resize metadata device");
return r;
}
*need_commit = true;
}
return 0;
}
/*
* Retrieves the number of blocks of the data device from
* the superblock and compares it to the actual device size,
* thus resizing the data device in case it has grown.
*
* This both copes with opening preallocated data devices in the ctr
* being followed by a resume
* -and-
* calling the resume method individually after userspace has
* grown the data device in reaction to a table event.
*/