Commit 72d711c8 authored by Mike Snitzer's avatar Mike Snitzer

dm: adjust structure members to improve alignment

Eliminate most holes in DM data structures that were modified by
commit 6f1c819c ("dm: convert to bioset_init()/mempool_init()").
Also prevent structure members from unnecessarily spanning cache
lines.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b2b04e7e
......@@ -19,8 +19,8 @@
struct dm_bio_prison {
spinlock_t lock;
mempool_t cell_pool;
struct rb_root cells;
mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
......
......@@ -21,8 +21,8 @@ struct dm_bio_prison_v2 {
struct workqueue_struct *wq;
spinlock_t lock;
mempool_t cell_pool;
struct rb_root cells;
mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
......
......@@ -371,7 +371,13 @@ struct cache_stats {
struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
spinlock_t lock;
/*
* Fields for converting from sectors to blocks.
*/
int sectors_per_block_shift;
sector_t sectors_per_block;
struct dm_cache_metadata *cmd;
......@@ -402,13 +408,11 @@ struct cache {
dm_cblock_t cache_size;
/*
* Fields for converting from sectors to blocks.
* Invalidation fields.
*/
sector_t sectors_per_block;
int sectors_per_block_shift;
spinlock_t invalidation_lock;
struct list_head invalidation_requests;
spinlock_t lock;
struct bio_list deferred_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
......@@ -419,13 +423,11 @@ struct cache {
*/
atomic_t nr_io_migrations;
struct bio_list deferred_bios;
struct rw_semaphore quiesce_lock;
/*
* cache_size entries, dirty if set
*/
atomic_t nr_dirty;
unsigned long *dirty_bitset;
struct dm_target_callbacks callbacks;
/*
* origin_blocks entries, discarded if set.
......@@ -442,17 +444,27 @@ struct cache {
const char **ctr_args;
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
struct work_struct migration_worker;
struct workqueue_struct *wq;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
struct bio_set bs;
mempool_t migration_pool;
/*
* cache_size entries, dirty if set
*/
unsigned long *dirty_bitset;
atomic_t nr_dirty;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
struct dm_cache_policy *policy;
/*
* Cache features such as write-through.
*/
struct cache_features features;
struct cache_stats stats;
bool need_tick_bio:1;
bool sized:1;
......@@ -461,25 +473,16 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
/*
* Cache features such as write-through.
*/
struct cache_features features;
struct cache_stats stats;
struct rw_semaphore background_work_lock;
/*
* Invalidation fields.
*/
spinlock_t invalidation_lock;
struct list_head invalidation_requests;
struct batcher committer;
struct work_struct commit_ws;
struct io_tracker tracker;
struct work_struct commit_ws;
struct batcher committer;
mempool_t migration_pool;
struct rw_semaphore background_work_lock;
struct bio_set bs;
};
struct per_bio_data {
......
......@@ -31,6 +31,9 @@ struct dm_kobject_holder {
struct mapped_device {
struct mutex suspend_lock;
struct mutex table_devices_lock;
struct list_head table_devices;
/*
* The current mapping (struct dm_table *).
* Use dm_get_live_table{_fast} or take suspend_lock for
......@@ -38,17 +41,14 @@ struct mapped_device {
*/
void __rcu *map;
struct list_head table_devices;
struct mutex table_devices_lock;
unsigned long flags;
struct request_queue *queue;
int numa_node_id;
enum dm_queue_mode type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
enum dm_queue_mode type;
int numa_node_id;
struct request_queue *queue;
atomic_t holders;
atomic_t open_count;
......@@ -56,21 +56,21 @@ struct mapped_device {
struct dm_target *immutable_target;
struct target_type *immutable_target_type;
char name[16];
struct gendisk *disk;
struct dax_device *dax_dev;
char name[16];
void *interface_ptr;
/*
* A list of ios that arrived while we were suspended.
*/
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
wait_queue_head_t wait;
atomic_t pending[2];
spinlock_t deferred_lock;
struct bio_list deferred;
void *interface_ptr;
/*
* Event handling.
*/
......@@ -83,17 +83,17 @@ struct mapped_device {
/* the number of internal suspends */
unsigned internal_suspend_count;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/*
* io objects are allocated from here.
*/
struct bio_set io_bs;
struct bio_set bs;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/*
* freeze/thaw support require holding onto a super block
*/
......@@ -102,11 +102,11 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
struct block_device *bdev;
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
struct block_device *bdev;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
......
......@@ -139,25 +139,13 @@ struct crypt_config {
struct dm_dev *dev;
sector_t start;
/*
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
mempool_t req_pool;
mempool_t page_pool;
mempool_t tag_pool;
unsigned tag_pool_max_sectors;
struct percpu_counter n_allocated_pages;
struct bio_set bs;
struct mutex bio_alloc_lock;
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
struct task_struct *write_thread;
wait_queue_head_t write_thread_wait;
struct task_struct *write_thread;
struct rb_root write_tree;
char *cipher;
......@@ -213,6 +201,18 @@ struct crypt_config {
unsigned int integrity_iv_size;
unsigned int on_disk_tag_size;
/*
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
unsigned tag_pool_max_sectors;
mempool_t tag_pool;
mempool_t req_pool;
mempool_t page_pool;
struct bio_set bs;
struct mutex bio_alloc_lock;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
u8 key[0];
};
......
......@@ -45,7 +45,6 @@ struct dm_kcopyd_client {
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
atomic_t nr_jobs;
mempool_t job_pool;
......@@ -54,6 +53,8 @@ struct dm_kcopyd_client {
struct dm_kcopyd_throttle *throttle;
atomic_t nr_jobs;
/*
* We maintain three lists of jobs:
*
......
......@@ -63,27 +63,28 @@ struct dm_region_hash {
/* hash table */
rwlock_t hash_lock;
mempool_t region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
struct list_head *buckets;
/*
* If there was a flush failure no regions can be marked clean.
*/
int flush_failure;
unsigned max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
struct semaphore recovery_count;
/*
* If there was a flush failure no regions can be marked clean.
*/
int flush_failure;
mempool_t region_pool;
void *context;
sector_t target_begin;
......
......@@ -240,9 +240,9 @@ struct pool {
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
struct work_struct worker;
struct workqueue_struct *wq;
struct throttle throttle;
struct work_struct worker;
struct delayed_work waker;
struct delayed_work no_space_timeout;
......@@ -260,7 +260,6 @@ struct pool {
struct dm_deferred_set *all_io_ds;
struct dm_thin_new_mapping *next_mapping;
mempool_t mapping_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
......@@ -273,6 +272,8 @@ struct pool {
process_mapping_fn process_prepared_discard_pt2;
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
};
static enum pool_mode get_pool_mode(struct pool *pool);
......
......@@ -52,9 +52,9 @@ struct dmz_target {
struct dmz_reclaim *reclaim;
/* For chunk work */
struct mutex chunk_lock;
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
struct mutex chunk_lock;
/* For cloned BIOs to zones */
struct bio_set bio_set;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment