Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6beca5eb authored by Joe Thornber's avatar Joe Thornber Committed by Alasdair G Kergon
Browse files

dm bio prison: pass cell memory in



Change the dm_bio_prison interface so that instead of allocating memory
internally, dm_bio_detain is supplied with a pre-allocated cell each
time it is called.

This enables a subsequent patch to move the allocation of the struct
dm_bio_prison_cell outside the thin target's mapping function so it can
no longer block there.

Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 4e7f1f90
Loading
Loading
Loading
Loading
+72 −67
Original line number Original line Diff line number Diff line
@@ -16,7 +16,6 @@


struct dm_bio_prison_cell {
struct dm_bio_prison_cell {
	struct hlist_node list;
	struct hlist_node list;
	struct dm_bio_prison *prison;
	struct dm_cell_key key;
	struct dm_cell_key key;
	struct bio *holder;
	struct bio *holder;
	struct bio_list bios;
	struct bio_list bios;
@@ -87,6 +86,19 @@ void dm_bio_prison_destroy(struct dm_bio_prison *prison)
}
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);


struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
	return mempool_alloc(prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);

void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
			     struct dm_bio_prison_cell *cell)
{
	mempool_free(cell, prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);

static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
{
	const unsigned long BIG_PRIME = 4294967291UL;
	const unsigned long BIG_PRIME = 4294967291UL;
@@ -114,91 +126,86 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
	return NULL;
	return NULL;
}
}


/*
static void __setup_new_cell(struct dm_bio_prison *prison,
 * This may block if a new cell needs allocating.  You must ensure that
			     struct dm_cell_key *key,
 * cells will be unlocked even if the calling thread is blocked.
			     struct bio *holder,
 *
			     uint32_t hash,
 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
			     struct dm_bio_prison_cell *cell)
 */
int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
		  struct bio *inmate, struct dm_bio_prison_cell **ref)
{
{
	int r = 1;
	memcpy(&cell->key, key, sizeof(cell->key));
	unsigned long flags;
	cell->holder = holder;
	uint32_t hash = hash_key(prison, key);
	bio_list_init(&cell->bios);
	struct dm_bio_prison_cell *cell, *cell2;
	hlist_add_head(&cell->list, prison->cells + hash);

	BUG_ON(hash > prison->nr_buckets);

	spin_lock_irqsave(&prison->lock, flags);

	cell = __search_bucket(prison->cells + hash, key);
	if (cell) {
		bio_list_add(&cell->bios, inmate);
		goto out;
}
}


	/*
static int __bio_detain(struct dm_bio_prison *prison,
	 * Allocate a new cell
			struct dm_cell_key *key,
	 */
			struct bio *inmate,
	spin_unlock_irqrestore(&prison->lock, flags);
			struct dm_bio_prison_cell *cell_prealloc,
	cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
			struct dm_bio_prison_cell **cell_result)
	spin_lock_irqsave(&prison->lock, flags);
{
	uint32_t hash = hash_key(prison, key);
	struct dm_bio_prison_cell *cell;


	/*
	 * We've been unlocked, so we have to double check that
	 * nobody else has inserted this cell in the meantime.
	 */
	cell = __search_bucket(prison->cells + hash, key);
	cell = __search_bucket(prison->cells + hash, key);
	if (cell) {
	if (cell) {
		mempool_free(cell2, prison->cell_pool);
		if (inmate)
			bio_list_add(&cell->bios, inmate);
			bio_list_add(&cell->bios, inmate);
		goto out;
		*cell_result = cell;
		return 1;
	}
	}


	/*
	__setup_new_cell(prison, key, inmate, hash, cell_prealloc);
	 * Use new cell.
	*cell_result = cell_prealloc;
	 */
	return 0;
	cell = cell2;
}

	cell->prison = prison;
	memcpy(&cell->key, key, sizeof(cell->key));
	cell->holder = inmate;
	bio_list_init(&cell->bios);
	hlist_add_head(&cell->list, prison->cells + hash);


	r = 0;
static int bio_detain(struct dm_bio_prison *prison,
		      struct dm_cell_key *key,
		      struct bio *inmate,
		      struct dm_bio_prison_cell *cell_prealloc,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	unsigned long flags;


out:
	spin_lock_irqsave(&prison->lock, flags);
	r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irqrestore(&prison->lock, flags);


	*ref = cell;

	return r;
	return r;
}
}

int dm_bio_detain(struct dm_bio_prison *prison,
		  struct dm_cell_key *key,
		  struct bio *inmate,
		  struct dm_bio_prison_cell *cell_prealloc,
		  struct dm_bio_prison_cell **cell_result)
{
	return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
}
EXPORT_SYMBOL_GPL(dm_bio_detain);
EXPORT_SYMBOL_GPL(dm_bio_detain);


/*
/*
 * @inmates must have been initialised prior to this call
 * @inmates must have been initialised prior to this call
 */
 */
static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
static void __cell_release(struct dm_bio_prison_cell *cell,
			   struct bio_list *inmates)
{
{
	struct dm_bio_prison *prison = cell->prison;

	hlist_del(&cell->list);
	hlist_del(&cell->list);


	if (inmates) {
	if (inmates) {
		if (cell->holder)
			bio_list_add(inmates, cell->holder);
			bio_list_add(inmates, cell->holder);
		bio_list_merge(inmates, &cell->bios);
		bio_list_merge(inmates, &cell->bios);
	}
	}

	mempool_free(cell, prison->cell_pool);
}
}


void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
void dm_cell_release(struct dm_bio_prison *prison,
		     struct dm_bio_prison_cell *cell,
		     struct bio_list *bios)
{
{
	unsigned long flags;
	unsigned long flags;
	struct dm_bio_prison *prison = cell->prison;


	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irqsave(&prison->lock, flags);
	__cell_release(cell, bios);
	__cell_release(cell, bios);
@@ -209,20 +216,18 @@ EXPORT_SYMBOL_GPL(dm_cell_release);
/*
/*
 * Sometimes we don't want the holder, just the additional bios.
 * Sometimes we don't want the holder, just the additional bios.
 */
 */
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
				     struct bio_list *inmates)
{
{
	struct dm_bio_prison *prison = cell->prison;

	hlist_del(&cell->list);
	hlist_del(&cell->list);
	bio_list_merge(inmates, &cell->bios);
	bio_list_merge(inmates, &cell->bios);

	mempool_free(cell, prison->cell_pool);
}
}


void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
void dm_cell_release_no_holder(struct dm_bio_prison *prison,
			       struct dm_bio_prison_cell *cell,
			       struct bio_list *inmates)
{
{
	unsigned long flags;
	unsigned long flags;
	struct dm_bio_prison *prison = cell->prison;


	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irqsave(&prison->lock, flags);
	__cell_release_no_holder(cell, inmates);
	__cell_release_no_holder(cell, inmates);
@@ -230,9 +235,9 @@ void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list
}
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);


void dm_cell_error(struct dm_bio_prison_cell *cell)
void dm_cell_error(struct dm_bio_prison *prison,
		   struct dm_bio_prison_cell *cell)
{
{
	struct dm_bio_prison *prison = cell->prison;
	struct bio_list bios;
	struct bio_list bios;
	struct bio *bio;
	struct bio *bio;
	unsigned long flags;
	unsigned long flags;
+27 −8
Original line number Original line Diff line number Diff line
@@ -35,17 +35,36 @@ struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
void dm_bio_prison_destroy(struct dm_bio_prison *prison);
void dm_bio_prison_destroy(struct dm_bio_prison *prison);


/*
/*
 * This may block if a new cell needs allocating.  You must ensure that
 * These two functions just wrap a mempool.  This is a transitory step:
 * cells will be unlocked even if the calling thread is blocked.
 * Eventually all bio prison clients should manage their own cell memory.
 *
 *
 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
 * in interrupt context or passed GFP_NOWAIT.
 */
 */
int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
		  struct bio *inmate, struct dm_bio_prison_cell **ref);
						    gfp_t gfp);
void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
			     struct dm_bio_prison_cell *cell);


void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
/*
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
 * An atomic op that combines retrieving a cell, and adding a bio to it.
void dm_cell_error(struct dm_bio_prison_cell *cell);
 *
 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 */
int dm_bio_detain(struct dm_bio_prison *prison,
		  struct dm_cell_key *key,
		  struct bio *inmate,
		  struct dm_bio_prison_cell *cell_prealloc,
		  struct dm_bio_prison_cell **cell_result);

void dm_cell_release(struct dm_bio_prison *prison,
		     struct dm_bio_prison_cell *cell,
		     struct bio_list *bios);
void dm_cell_release_no_holder(struct dm_bio_prison *prison,
			       struct dm_bio_prison_cell *cell,
			       struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison *prison,
		   struct dm_bio_prison_cell *cell);


/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/


+77 −26
Original line number Original line Diff line number Diff line
@@ -229,6 +229,54 @@ struct thin_c {


/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/


static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

/*
/*
 * A global list of pools that uses a struct mapped_device as a key.
 * A global list of pools that uses a struct mapped_device as a key.
 */
 */
@@ -524,14 +572,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&pool->lock, flags);
	spin_lock_irqsave(&pool->lock, flags);
	dm_cell_release(cell, &pool->deferred_bios);
	cell_release(pool, cell, &pool->deferred_bios);
	spin_unlock_irqrestore(&tc->pool->lock, flags);
	spin_unlock_irqrestore(&tc->pool->lock, flags);


	wake_worker(pool);
	wake_worker(pool);
}
}


/*
/*
 * Same as cell_defer except it omits the original holder of the cell.
 * Same as cell_defer above, except it omits the original holder of the cell.
 */
 */
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
{
@@ -539,7 +587,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&pool->lock, flags);
	spin_lock_irqsave(&pool->lock, flags);
	dm_cell_release_no_holder(cell, &pool->deferred_bios);
	cell_release_no_holder(pool, cell, &pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);
	spin_unlock_irqrestore(&pool->lock, flags);


	wake_worker(pool);
	wake_worker(pool);
@@ -549,13 +597,14 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
{
	if (m->bio)
	if (m->bio)
		m->bio->bi_end_io = m->saved_bi_end_io;
		m->bio->bi_end_io = m->saved_bi_end_io;
	dm_cell_error(m->cell);
	cell_error(m->tc->pool, m->cell);
	list_del(&m->list);
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
	mempool_free(m, m->tc->pool->mapping_pool);
}
}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
{
	struct thin_c *tc = m->tc;
	struct thin_c *tc = m->tc;
	struct pool *pool = tc->pool;
	struct bio *bio;
	struct bio *bio;
	int r;
	int r;


@@ -564,7 +613,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
		bio->bi_end_io = m->saved_bi_end_io;
		bio->bi_end_io = m->saved_bi_end_io;


	if (m->err) {
	if (m->err) {
		dm_cell_error(m->cell);
		cell_error(pool, m->cell);
		goto out;
		goto out;
	}
	}


@@ -576,7 +625,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
	if (r) {
		DMERR_LIMIT("dm_thin_insert_block() failed");
		DMERR_LIMIT("dm_thin_insert_block() failed");
		dm_cell_error(m->cell);
		cell_error(pool, m->cell);
		goto out;
		goto out;
	}
	}


@@ -594,7 +643,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)


out:
out:
	list_del(&m->list);
	list_del(&m->list);
	mempool_free(m, tc->pool->mapping_pool);
	mempool_free(m, pool->mapping_pool);
}
}


static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -745,7 +794,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
		if (r < 0) {
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			mempool_free(m, pool->mapping_pool);
			DMERR_LIMIT("dm_kcopyd_copy() failed");
			DMERR_LIMIT("dm_kcopyd_copy() failed");
			dm_cell_error(cell);
			cell_error(pool, cell);
		}
		}
	}
	}
}
}
@@ -811,7 +860,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
		if (r < 0) {
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			mempool_free(m, pool->mapping_pool);
			DMERR_LIMIT("dm_kcopyd_zero() failed");
			DMERR_LIMIT("dm_kcopyd_zero() failed");
			dm_cell_error(cell);
			cell_error(pool, cell);
		}
		}
	}
	}
}
}
@@ -917,13 +966,13 @@ static void retry_on_resume(struct bio *bio)
	spin_unlock_irqrestore(&pool->lock, flags);
	spin_unlock_irqrestore(&pool->lock, flags);
}
}


static void no_space(struct dm_bio_prison_cell *cell)
static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
{
{
	struct bio *bio;
	struct bio *bio;
	struct bio_list bios;
	struct bio_list bios;


	bio_list_init(&bios);
	bio_list_init(&bios);
	dm_cell_release(cell, &bios);
	cell_release(pool, cell, &bios);


	while ((bio = bio_list_pop(&bios)))
	while ((bio = bio_list_pop(&bios)))
		retry_on_resume(bio);
		retry_on_resume(bio);
@@ -941,7 +990,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
	struct dm_thin_new_mapping *m;
	struct dm_thin_new_mapping *m;


	build_virtual_key(tc->td, block, &key);
	build_virtual_key(tc->td, block, &key);
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
	if (bio_detain(tc->pool, &key, bio, &cell))
		return;
		return;


	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -953,7 +1002,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
		 * on this block.
		 * on this block.
		 */
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
		build_data_key(tc->td, lookup_result.block, &key2);
		if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell);
			break;
			break;
		}
		}
@@ -1029,13 +1078,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
		break;
		break;


	case -ENOSPC:
	case -ENOSPC:
		no_space(cell);
		no_space(tc->pool, cell);
		break;
		break;


	default:
	default:
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
			    __func__, r);
		dm_cell_error(cell);
		cell_error(tc->pool, cell);
		break;
		break;
	}
	}
}
}
@@ -1053,7 +1102,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
	 * of being broken so we have nothing further to do here.
	 * of being broken so we have nothing further to do here.
	 */
	 */
	build_data_key(tc->td, lookup_result->block, &key);
	build_data_key(tc->td, lookup_result->block, &key);
	if (dm_bio_detain(pool->prison, &key, bio, &cell))
	if (bio_detain(pool, &key, bio, &cell))
		return;
		return;


	if (bio_data_dir(bio) == WRITE && bio->bi_size)
	if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1074,12 +1123,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
{
{
	int r;
	int r;
	dm_block_t data_block;
	dm_block_t data_block;
	struct pool *pool = tc->pool;


	/*
	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
	 */
	if (!bio->bi_size) {
	if (!bio->bi_size) {
		inc_all_io_entry(tc->pool, bio);
		inc_all_io_entry(pool, bio);
		cell_defer_no_holder(tc, cell);
		cell_defer_no_holder(tc, cell);


		remap_and_issue(tc, bio, 0);
		remap_and_issue(tc, bio, 0);
@@ -1106,14 +1156,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
		break;
		break;


	case -ENOSPC:
	case -ENOSPC:
		no_space(cell);
		no_space(pool, cell);
		break;
		break;


	default:
	default:
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
			    __func__, r);
		set_pool_mode(tc->pool, PM_READ_ONLY);
		set_pool_mode(pool, PM_READ_ONLY);
		dm_cell_error(cell);
		cell_error(pool, cell);
		break;
		break;
	}
	}
}
}
@@ -1121,6 +1171,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
static void process_bio(struct thin_c *tc, struct bio *bio)
static void process_bio(struct thin_c *tc, struct bio *bio)
{
{
	int r;
	int r;
	struct pool *pool = tc->pool;
	dm_block_t block = get_bio_block(tc, bio);
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_bio_prison_cell *cell;
	struct dm_bio_prison_cell *cell;
	struct dm_cell_key key;
	struct dm_cell_key key;
@@ -1131,7 +1182,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
	 * being provisioned so we have nothing further to do here.
	 * being provisioned so we have nothing further to do here.
	 */
	 */
	build_virtual_key(tc->td, block, &key);
	build_virtual_key(tc->td, block, &key);
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
	if (bio_detain(pool, &key, bio, &cell))
		return;
		return;


	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1139,9 +1190,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
	case 0:
	case 0:
		if (lookup_result.shared) {
		if (lookup_result.shared) {
			process_shared_bio(tc, bio, block, &lookup_result);
			process_shared_bio(tc, bio, block, &lookup_result);
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
		} else {
		} else {
			inc_all_io_entry(tc->pool, bio);
			inc_all_io_entry(pool, bio);
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell);


			remap_and_issue(tc, bio, lookup_result.block);
			remap_and_issue(tc, bio, lookup_result.block);
@@ -1150,7 +1201,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)


	case -ENODATA:
	case -ENODATA:
		if (bio_data_dir(bio) == READ && tc->origin_dev) {
		if (bio_data_dir(bio) == READ && tc->origin_dev) {
			inc_all_io_entry(tc->pool, bio);
			inc_all_io_entry(pool, bio);
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell);


			remap_to_origin_and_issue(tc, bio);
			remap_to_origin_and_issue(tc, bio);
@@ -1429,11 +1480,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
		}
		}


		build_virtual_key(tc->td, block, &key);
		build_virtual_key(tc->td, block, &key);
		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
		if (bio_detain(tc->pool, &key, bio, &cell1))
			return DM_MAPIO_SUBMITTED;
			return DM_MAPIO_SUBMITTED;


		build_data_key(tc->td, result.block, &key);
		build_data_key(tc->td, result.block, &key);
		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
		if (bio_detain(tc->pool, &key, bio, &cell2)) {
			cell_defer_no_holder(tc, cell1);
			cell_defer_no_holder(tc, cell1);
			return DM_MAPIO_SUBMITTED;
			return DM_MAPIO_SUBMITTED;
		}
		}