Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72d711c8 authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: adjust structure members to improve alignment



Eliminate most holes in DM data structures that were modified by
commit 6f1c819c ("dm: convert to bioset_init()/mempool_init()").
Also prevent structure members from unnecessarily spanning cache
lines.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b2b04e7e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -19,8 +19,8 @@

struct dm_bio_prison {
	spinlock_t lock;
	mempool_t cell_pool;
	struct rb_root cells;
	mempool_t cell_pool;
};

static struct kmem_cache *_cell_cache;
+1 −1
Original line number Diff line number Diff line
@@ -21,8 +21,8 @@ struct dm_bio_prison_v2 {
	struct workqueue_struct *wq;

	spinlock_t lock;
	mempool_t cell_pool;
	struct rb_root cells;
	mempool_t cell_pool;
};

static struct kmem_cache *_cell_cache;
+32 −29
Original line number Diff line number Diff line
@@ -371,7 +371,13 @@ struct cache_stats {

struct cache {
	struct dm_target *ti;
	struct dm_target_callbacks callbacks;
	spinlock_t lock;

	/*
	 * Fields for converting from sectors to blocks.
	 */
	int sectors_per_block_shift;
	sector_t sectors_per_block;

	struct dm_cache_metadata *cmd;

@@ -402,13 +408,11 @@ struct cache {
	dm_cblock_t cache_size;

	/*
	 * Fields for converting from sectors to blocks.
	 * Invalidation fields.
	 */
	sector_t sectors_per_block;
	int sectors_per_block_shift;
	spinlock_t invalidation_lock;
	struct list_head invalidation_requests;

	spinlock_t lock;
	struct bio_list deferred_bios;
	sector_t migration_threshold;
	wait_queue_head_t migration_wait;
	atomic_t nr_allocated_migrations;
@@ -419,13 +423,11 @@ struct cache {
	 */
	atomic_t nr_io_migrations;

	struct bio_list deferred_bios;

	struct rw_semaphore quiesce_lock;

	/*
	 * cache_size entries, dirty if set
	 */
	atomic_t nr_dirty;
	unsigned long *dirty_bitset;
	struct dm_target_callbacks callbacks;

	/*
	 * origin_blocks entries, discarded if set.
@@ -442,17 +444,27 @@ struct cache {
	const char **ctr_args;

	struct dm_kcopyd_client *copier;
	struct workqueue_struct *wq;
	struct work_struct deferred_bio_worker;
	struct work_struct migration_worker;
	struct workqueue_struct *wq;
	struct delayed_work waker;
	struct dm_bio_prison_v2 *prison;
	struct bio_set bs;

	mempool_t migration_pool;
	/*
	 * cache_size entries, dirty if set
	 */
	unsigned long *dirty_bitset;
	atomic_t nr_dirty;

	struct dm_cache_policy *policy;
	unsigned policy_nr_args;
	struct dm_cache_policy *policy;

	/*
	 * Cache features such as write-through.
	 */
	struct cache_features features;

	struct cache_stats stats;

	bool need_tick_bio:1;
	bool sized:1;
@@ -461,25 +473,16 @@ struct cache {
	bool loaded_mappings:1;
	bool loaded_discards:1;

	/*
	 * Cache features such as write-through.
	 */
	struct cache_features features;

	struct cache_stats stats;
	struct rw_semaphore background_work_lock;

	/*
	 * Invalidation fields.
	 */
	spinlock_t invalidation_lock;
	struct list_head invalidation_requests;
	struct batcher committer;
	struct work_struct commit_ws;

	struct io_tracker tracker;

	struct work_struct commit_ws;
	struct batcher committer;
	mempool_t migration_pool;

	struct rw_semaphore background_work_lock;
	struct bio_set bs;
};

struct per_bio_data {
+19 −19
Original line number Diff line number Diff line
@@ -31,6 +31,9 @@ struct dm_kobject_holder {
struct mapped_device {
	struct mutex suspend_lock;

	struct mutex table_devices_lock;
	struct list_head table_devices;

	/*
	 * The current mapping (struct dm_table *).
	 * Use dm_get_live_table{_fast} or take suspend_lock for
@@ -38,17 +41,14 @@ struct mapped_device {
	 */
	void __rcu *map;

	struct list_head table_devices;
	struct mutex table_devices_lock;

	unsigned long flags;

	struct request_queue *queue;
	int numa_node_id;

	enum dm_queue_mode type;
	/* Protect queue and type against concurrent access. */
	struct mutex type_lock;
	enum dm_queue_mode type;

	int numa_node_id;
	struct request_queue *queue;

	atomic_t holders;
	atomic_t open_count;
@@ -56,21 +56,21 @@ struct mapped_device {
	struct dm_target *immutable_target;
	struct target_type *immutable_target_type;

	char name[16];
	struct gendisk *disk;
	struct dax_device *dax_dev;
	char name[16];

	void *interface_ptr;

	/*
	 * A list of ios that arrived while we were suspended.
	 */
	atomic_t pending[2];
	wait_queue_head_t wait;
	struct work_struct work;
	wait_queue_head_t wait;
	atomic_t pending[2];
	spinlock_t deferred_lock;
	struct bio_list deferred;

	void *interface_ptr;

	/*
	 * Event handling.
	 */
@@ -83,17 +83,17 @@ struct mapped_device {
	/* the number of internal suspends */
	unsigned internal_suspend_count;

	/*
	 * Processing queue (flush)
	 */
	struct workqueue_struct *wq;

	/*
	 * io objects are allocated from here.
	 */
	struct bio_set io_bs;
	struct bio_set bs;

	/*
	 * Processing queue (flush)
	 */
	struct workqueue_struct *wq;

	/*
	 * freeze/thaw support require holding onto a super block
	 */
@@ -102,11 +102,11 @@ struct mapped_device {
	/* forced geometry settings */
	struct hd_geometry geometry;

	struct block_device *bdev;

	/* kobject and completion */
	struct dm_kobject_holder kobj_holder;

	struct block_device *bdev;

	/* zero-length flush that will be cloned and submitted to targets */
	struct bio flush_bio;

+13 −13
Original line number Diff line number Diff line
@@ -139,25 +139,13 @@ struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data, crypto requests,
	 * encryption requeusts/buffer pages and integrity tags
	 */
	mempool_t req_pool;
	mempool_t page_pool;
	mempool_t tag_pool;
	unsigned tag_pool_max_sectors;

	struct percpu_counter n_allocated_pages;

	struct bio_set bs;
	struct mutex bio_alloc_lock;

	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;

	struct task_struct *write_thread;
	wait_queue_head_t write_thread_wait;
	struct task_struct *write_thread;
	struct rb_root write_tree;

	char *cipher;
@@ -213,6 +201,18 @@ struct crypt_config {
	unsigned int integrity_iv_size;
	unsigned int on_disk_tag_size;

	/*
	 * pool for per bio private data, crypto requests,
	 * encryption requeusts/buffer pages and integrity tags
	 */
	unsigned tag_pool_max_sectors;
	mempool_t tag_pool;
	mempool_t req_pool;
	mempool_t page_pool;

	struct bio_set bs;
	struct mutex bio_alloc_lock;

	u8 *authenc_key; /* space for keys in authenc() format (if used) */
	u8 key[0];
};
Loading