Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4281808f authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner
Browse files

drbd: add page pool to be used for meta data IO

parent 0e8488ad
Loading
Loading
Loading
Loading
+22 −1
Original line number Diff line number Diff line
@@ -1496,11 +1496,32 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
extern mempool_t *drbd_ee_mempool;

extern struct page *drbd_pp_pool; /* drbd's page pool */
/* drbd's page pool, used to buffer data received from the peer,
 * or data requested by the peer.
 *
 * This does not have an emergency reserve.
 *
 * When allocating from this pool, it first takes pages from the pool.
 * Only if the pool is depleted will try to allocate from the system.
 *
 * The assumption is that pages taken from this pool will be processed,
 * and given back, "quickly", and then can be recycled, so we can avoid
 * frequent calls to alloc_page(), and still will be able to make progress even
 * under memory pressure.
 */
extern struct page *drbd_pp_pool;
extern spinlock_t   drbd_pp_lock;
extern int	    drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;

/* We also need a standard (emergency-reserve backed) page pool
 * for meta data IO (activity log, bitmap).
 * We can keep it global, as long as it is used as "N pages at a time".
 * 128 should be plenty, currently we probably can get away with as few as 1.
 */
#define DRBD_MIN_POOL_PAGES	128
extern mempool_t *drbd_md_io_page_pool;

extern rwlock_t global_state_lock;

extern struct drbd_conf *drbd_new_device(unsigned int minor);
+9 −0
Original line number Diff line number Diff line
@@ -139,6 +139,7 @@ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
mempool_t *drbd_md_io_page_pool;

/* I do not use a standard mempool, because:
   1) I want to hand out the pre-allocated objects first.
@@ -3264,6 +3265,8 @@ static void drbd_destroy_mempools(void)

	/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */

	if (drbd_md_io_page_pool)
		mempool_destroy(drbd_md_io_page_pool);
	if (drbd_ee_mempool)
		mempool_destroy(drbd_ee_mempool);
	if (drbd_request_mempool)
@@ -3277,6 +3280,7 @@ static void drbd_destroy_mempools(void)
	if (drbd_al_ext_cache)
		kmem_cache_destroy(drbd_al_ext_cache);

	drbd_md_io_page_pool = NULL;
	drbd_ee_mempool      = NULL;
	drbd_request_mempool = NULL;
	drbd_ee_cache        = NULL;
@@ -3300,6 +3304,7 @@ static int drbd_create_mempools(void)
	drbd_bm_ext_cache    = NULL;
	drbd_al_ext_cache    = NULL;
	drbd_pp_pool         = NULL;
	drbd_md_io_page_pool = NULL;

	/* caches */
	drbd_request_cache = kmem_cache_create(
@@ -3323,6 +3328,10 @@ static int drbd_create_mempools(void)
		goto Enomem;

	/* mempools */
	drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
	if (drbd_md_io_page_pool == NULL)
		goto Enomem;

	drbd_request_mempool = mempool_create(number,
		mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
	if (drbd_request_mempool == NULL)