Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 588726d3 authored by Javier González's avatar Javier González Committed by Jens Axboe
Browse files

lightnvm: pblk: fail gracefully on irrec. error



Due to user writes being decoupled from media writes because of the need
of an intermediate write buffer, irrecoverable media write errors lead
to pblk stalling; user writes fill up the buffer and end up in an
infinite retry loop.

In order to let user writes fail gracefully, it is necessary for pblk to
keep track of its own internal state and prevent further writes from
being placed into the write buffer.

This patch implements a state machine to keep track of internal errors
and, in case of failure, fail further user writes in an standard way.
Depending on the type of error, pblk will do its best to persist
buffered writes (which are already acknowledged) and close down on a
graceful manner. This way, data might be recovered by re-instantiating
pblk. Such state machine paves out the way for a state-based FTL log.

Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <matias@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ef576494
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -31,9 +31,13 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
	 */
retry:
	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
	if (ret == NVM_IO_REQUEUE) {
	switch (ret) {
	case NVM_IO_REQUEUE:
		io_schedule();
		goto retry;
	case NVM_IO_ERR:
		pblk_pipeline_stop(pblk);
		goto out;
	}

	if (unlikely(!bio_has_data(bio)))
@@ -58,6 +62,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
	atomic_long_add(nr_entries, &pblk->req_writes);
#endif

	pblk_rl_inserted(&pblk->rl, nr_entries);

out:
	pblk_write_should_kick(pblk);
	return ret;
+199 −81
Original line number Diff line number Diff line
@@ -53,6 +53,8 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
		*ppa = rqd->ppa_addr;
		pblk_mark_bb(pblk, line, ppa);
	}

	atomic_dec(&pblk->inflight_io);
}

/* Erase completion assumes that only one block is erased at the time */
@@ -257,35 +259,25 @@ void pblk_end_io_sync(struct nvm_rq *rqd)
	complete(waiting);
}

void pblk_flush_writer(struct pblk *pblk)
void pblk_wait_for_meta(struct pblk *pblk)
{
	struct bio *bio;
	int ret;
	DECLARE_COMPLETION_ONSTACK(wait);

	bio = bio_alloc(GFP_KERNEL, 1);
	if (!bio)
		return;

	bio->bi_iter.bi_sector = 0; /* internal bio */
	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
	bio->bi_private = &wait;
	bio->bi_end_io = pblk_end_bio_sync;
	do {
		if (!atomic_read(&pblk->inflight_io))
			break;

	ret = pblk_write_to_cache(pblk, bio, 0);
	if (ret == NVM_IO_OK) {
		if (!wait_for_completion_io_timeout(&wait,
				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
			pr_err("pblk: flush cache timed out\n");
		}
	} else if (ret != NVM_IO_DONE) {
		pr_err("pblk: tear down bio failed\n");
		schedule();
	} while (1);
}

	if (bio->bi_status)
		pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
static void pblk_flush_writer(struct pblk *pblk)
{
	pblk_rb_flush(&pblk->rwb);
	do {
		if (!pblk_rb_read_count(&pblk->rwb))
			break;

	bio_put(bio);
		schedule();
	} while (1);
}

struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
@@ -425,6 +417,9 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
		}
	}
#endif

	atomic_inc(&pblk->inflight_io);

	return nvm_submit_io(dev, rqd);
}

@@ -676,6 +671,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
		pr_err("pblk: emeta I/O timed out\n");
	}
	atomic_dec(&pblk->inflight_io);
	reinit_completion(&wait);

	if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
@@ -791,6 +787,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
		pr_err("pblk: smeta I/O timed out\n");
	}
	atomic_dec(&pblk->inflight_io);

	if (rqd.error) {
		if (dir == WRITE)
@@ -832,7 +829,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
	struct nvm_rq rqd;
	int ret;
	int ret = 0;
	DECLARE_COMPLETION_ONSTACK(wait);

	memset(&rqd, 0, sizeof(struct nvm_rq));
@@ -867,14 +864,14 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
	rqd.private = pblk;
	__pblk_end_io_erase(pblk, &rqd);

	return 0;
	return ret;
}

int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
{
	struct pblk_line_meta *lm = &pblk->lm;
	struct ppa_addr ppa;
	int bit = -1;
	int ret, bit = -1;

	/* Erase only good blocks, one at a time */
	do {
@@ -893,9 +890,10 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
		spin_unlock(&line->lock);

		if (pblk_blk_erase_sync(pblk, ppa)) {
		ret = pblk_blk_erase_sync(pblk, ppa);
		if (ret) {
			pr_err("pblk: failed to erase line %d\n", line->id);
			return -ENOMEM;
			return ret;
		}
	} while (1);

@@ -908,6 +906,8 @@ static void pblk_line_setup_metadata(struct pblk_line *line,
{
	int meta_line;

	lockdep_assert_held(&l_mg->free_lock);

retry_meta:
	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
	if (meta_line == PBLK_DATA_LINES) {
@@ -1039,7 +1039,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
	/* Mark smeta metadata sectors as bad sectors */
	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
	off = bit * geo->sec_per_pl;
retry_smeta:
	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
	line->sec_in_line -= lm->smeta_sec;
	line->smeta_ssec = off;
@@ -1047,8 +1046,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,

	if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
		pr_debug("pblk: line smeta I/O failed. Retry\n");
		off += geo->sec_per_pl;
		goto retry_smeta;
		return 1;
	}

	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
@@ -1110,10 +1108,14 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)

	spin_lock(&line->lock);
	if (line->state != PBLK_LINESTATE_FREE) {
		mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
		mempool_free(line->map_bitmap, pblk->line_meta_pool);
		spin_unlock(&line->lock);
		WARN(1, "pblk: corrupted line state\n");
		return -EINTR;
		WARN(1, "pblk: corrupted line %d, state %d\n",
							line->id, line->state);
		return -EAGAIN;
	}

	line->state = PBLK_LINESTATE_OPEN;

	atomic_set(&line->left_eblks, blk_in_line);
@@ -1169,15 +1171,15 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_line *line = NULL;
	int bit;
	struct pblk_line *line;
	int ret, bit;

	lockdep_assert_held(&l_mg->free_lock);

retry_get:
retry:
	if (list_empty(&l_mg->free_list)) {
		pr_err("pblk: no free lines\n");
		goto out;
		return NULL;
	}

	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
@@ -1193,17 +1195,22 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
		list_add_tail(&line->list, &l_mg->bad_list);

		pr_debug("pblk: line %d is bad\n", line->id);
		goto retry_get;
		goto retry;
	}

	if (pblk_line_prepare(pblk, line)) {
	ret = pblk_line_prepare(pblk, line);
	if (ret) {
		if (ret == -EAGAIN) {
			list_add(&line->list, &l_mg->corrupt_list);
			goto retry;
		} else {
			pr_err("pblk: failed to prepare line %d\n", line->id);
			list_add(&line->list, &l_mg->free_list);
			l_mg->nr_free_lines++;
			return NULL;
		}
	}

out:
	return line;
}

@@ -1213,6 +1220,7 @@ static struct pblk_line *pblk_line_retry(struct pblk *pblk,
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line *retry_line;

retry:
	spin_lock(&l_mg->free_lock);
	retry_line = pblk_line_get(pblk);
	if (!retry_line) {
@@ -1229,18 +1237,21 @@ static struct pblk_line *pblk_line_retry(struct pblk *pblk,
	l_mg->data_line = retry_line;
	spin_unlock(&l_mg->free_lock);

	if (pblk_line_erase(pblk, retry_line)) {
		spin_lock(&l_mg->free_lock);
		l_mg->data_line = NULL;
		spin_unlock(&l_mg->free_lock);
		return NULL;
	}

	pblk_rl_free_lines_dec(&pblk->rl, retry_line);

	if (pblk_line_erase(pblk, retry_line))
		goto retry;

	return retry_line;
}

static void pblk_set_space_limit(struct pblk *pblk)
{
	struct pblk_rl *rl = &pblk->rl;

	atomic_set(&rl->rb_space, 0);
}

struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1262,20 +1273,31 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)

	/* Allocate next line for preparation */
	l_mg->data_next = pblk_line_get(pblk);
	if (l_mg->data_next) {
	if (!l_mg->data_next) {
		/* If we cannot get a new line, we need to stop the pipeline.
		 * Only allow as many writes in as we can store safely and then
		 * fail gracefully
		 */
		pblk_set_space_limit(pblk);

		l_mg->data_next = NULL;
	} else {
		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
		l_mg->data_next->type = PBLK_LINETYPE_DATA;
		is_next = 1;
	}
	spin_unlock(&l_mg->free_lock);

	if (pblk_line_erase(pblk, line)) {
		line = pblk_line_retry(pblk, line);
		if (!line)
			return NULL;
	}

	pblk_rl_free_lines_dec(&pblk->rl, line);
	if (is_next)
		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);

	if (pblk_line_erase(pblk, line))
		return NULL;

retry_setup:
	if (!pblk_line_init_metadata(pblk, line, NULL)) {
		line = pblk_line_retry(pblk, line);
@@ -1296,7 +1318,47 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
	return line;
}

struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
{
	lockdep_assert_held(&pblk->l_mg.free_lock);

	pblk_set_space_limit(pblk);
	pblk->state = PBLK_STATE_STOPPING;
}

void pblk_pipeline_stop(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	int ret;

	spin_lock(&l_mg->free_lock);
	if (pblk->state == PBLK_STATE_RECOVERING ||
					pblk->state == PBLK_STATE_STOPPED) {
		spin_unlock(&l_mg->free_lock);
		return;
	}
	pblk->state = PBLK_STATE_RECOVERING;
	spin_unlock(&l_mg->free_lock);

	pblk_flush_writer(pblk);
	pblk_wait_for_meta(pblk);

	ret = pblk_recov_pad(pblk);
	if (ret) {
		pr_err("pblk: could not close data on teardown(%d)\n", ret);
		return;
	}

	pblk_line_close_meta_sync(pblk);

	spin_lock(&l_mg->free_lock);
	pblk->state = PBLK_STATE_STOPPED;
	l_mg->data_line = NULL;
	l_mg->data_next = NULL;
	spin_unlock(&l_mg->free_lock);
}

void pblk_line_replace_data(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line *cur, *new;
@@ -1306,42 +1368,38 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
	cur = l_mg->data_line;
	new = l_mg->data_next;
	if (!new)
		return NULL;
		return;
	l_mg->data_line = new;

retry_line:
	spin_lock(&l_mg->free_lock);
	if (pblk->state != PBLK_STATE_RUNNING) {
		l_mg->data_line = NULL;
		l_mg->data_next = NULL;
		spin_unlock(&l_mg->free_lock);
		return;
	}

	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
	spin_unlock(&l_mg->free_lock);

retry_erase:
	left_seblks = atomic_read(&new->left_seblks);
	if (left_seblks) {
		/* If line is not fully erased, erase it */
		if (atomic_read(&new->left_eblks)) {
			if (pblk_line_erase(pblk, new))
				return NULL;
				return;
		} else {
			io_schedule();
		}
		goto retry_line;
	}

	spin_lock(&l_mg->free_lock);
	/* Allocate next line for preparation */
	l_mg->data_next = pblk_line_get(pblk);
	if (l_mg->data_next) {
		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
		l_mg->data_next->type = PBLK_LINETYPE_DATA;
		is_next = 1;
		goto retry_erase;
	}

	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
	spin_unlock(&l_mg->free_lock);

	if (is_next)
		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);

retry_setup:
	if (!pblk_line_init_metadata(pblk, new, cur)) {
		new = pblk_line_retry(pblk, new);
		if (!new)
			return NULL;
			return;

		goto retry_setup;
	}
@@ -1349,12 +1407,30 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
	if (!pblk_line_init_bb(pblk, new, 1)) {
		new = pblk_line_retry(pblk, new);
		if (!new)
			return NULL;
			return;

		goto retry_setup;
	}

	return new;
	/* Allocate next line for preparation */
	spin_lock(&l_mg->free_lock);
	l_mg->data_next = pblk_line_get(pblk);
	if (!l_mg->data_next) {
		/* If we cannot get a new line, we need to stop the pipeline.
		 * Only allow as many writes in as we can store safely and then
		 * fail gracefully
		 */
		pblk_stop_writes(pblk, new);
		l_mg->data_next = NULL;
	} else {
		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
		l_mg->data_next->type = PBLK_LINETYPE_DATA;
		is_next = 1;
	}
	spin_unlock(&l_mg->free_lock);

	if (is_next)
		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
}

void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
@@ -1438,6 +1514,46 @@ int pblk_line_is_full(struct pblk_line *line)
	return (line->left_msecs == 0);
}

void pblk_line_close_meta_sync(struct pblk *pblk)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
	struct pblk_line_meta *lm = &pblk->lm;
	struct pblk_line *line, *tline;
	LIST_HEAD(list);

	spin_lock(&l_mg->close_lock);
	if (list_empty(&l_mg->emeta_list)) {
		spin_unlock(&l_mg->close_lock);
		return;
	}

	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
	spin_unlock(&l_mg->close_lock);

	list_for_each_entry_safe(line, tline, &list, list) {
		struct pblk_emeta *emeta = line->emeta;

		while (emeta->mem < lm->emeta_len[0]) {
			int ret;

			ret = pblk_submit_meta_io(pblk, line);
			if (ret) {
				pr_err("pblk: sync meta line %d failed (%d)\n",
							line->id, ret);
				return;
			}
		}
	}

	pblk_wait_for_meta(pblk);
}

static void pblk_line_should_sync_meta(struct pblk *pblk)
{
	if (pblk_rl_is_limit(&pblk->rl))
		pblk_line_close_meta_sync(pblk);
}

void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
{
	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1477,7 +1593,7 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
	struct pblk_emeta *emeta = line->emeta;
	struct line_emeta *emeta_buf = emeta->buf;

	/* No need for exact vsc value; avoid a big line lock and tak aprox. */
	/* No need for exact vsc value; avoid a big line lock and take aprox. */
	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);

@@ -1489,6 +1605,8 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
	list_add_tail(&line->list, &l_mg->emeta_list);
	spin_unlock(&line->lock);
	spin_unlock(&l_mg->close_lock);

	pblk_line_should_sync_meta(pblk);
}

void pblk_line_close_ws(struct work_struct *work)
+4 −2
Original line number Diff line number Diff line
@@ -372,11 +372,13 @@ static void pblk_line_meta_free(struct pblk *pblk)
	kfree(l_mg->bb_aux);
	kfree(l_mg->vsc_list);

	spin_lock(&l_mg->free_lock);
	for (i = 0; i < PBLK_DATA_LINES; i++) {
		kfree(l_mg->sline_meta[i]);
		pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
		kfree(l_mg->eline_meta[i]);
	}
	spin_unlock(&l_mg->free_lock);

	kfree(pblk->lines);
}
@@ -859,10 +861,9 @@ static void pblk_free(struct pblk *pblk)

static void pblk_tear_down(struct pblk *pblk)
{
	pblk_flush_writer(pblk);
	pblk_pipeline_stop(pblk);
	pblk_writer_stop(pblk);
	pblk_rb_sync_l2p(&pblk->rwb);
	pblk_recov_pad(pblk);
	pblk_rwb_free(pblk);
	pblk_rl_free(&pblk->rl);

@@ -908,6 +909,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,

	pblk->dev = dev;
	pblk->disk = tdisk;
	pblk->state = PBLK_STATE_RUNNING;

	spin_lock_init(&pblk->trans_lock);
	spin_lock_init(&pblk->lock);
+17 −6
Original line number Diff line number Diff line
@@ -62,9 +62,8 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,

	if (pblk_line_is_full(line)) {
		struct pblk_line *prev_line = line;
		line = pblk_line_replace_data(pblk);
		if (!line)
			return;

		pblk_line_replace_data(pblk);
		pblk_line_close_meta(pblk, prev_line);
	}

@@ -106,10 +105,16 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
		pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
					lun_bitmap, &meta_list[i], map_secs);

		/* line can change after page map */
		e_line = pblk_line_get_erase(pblk);
		erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);

		/* line can change after page map. We might also be writing the
		 * last line.
		 */
		e_line = pblk_line_get_erase(pblk);
		if (!e_line)
			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
							valid_secs, i + min);

		spin_lock(&e_line->lock);
		if (!test_bit(erase_lun, e_line->erase_bitmap)) {
			set_bit(erase_lun, e_line->erase_bitmap);
@@ -127,9 +132,15 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
		spin_unlock(&e_line->lock);
	}

	e_line = pblk_line_get_erase(pblk);
	d_line = pblk_line_get_data(pblk);

	/* line can change after page map. We might also be writing the
	 * last line.
	 */
	e_line = pblk_line_get_erase(pblk);
	if (!e_line)
		return;

	/* Erase blocks that are bad in this line but might not be in next */
	if (unlikely(ppa_empty(*erase_ppa)) &&
			bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
+20 −5
Original line number Diff line number Diff line
@@ -369,6 +369,9 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
	/* Protect syncs */
	smp_store_release(&rb->sync_point, sync_point);

	if (!bio)
		return 0;

	spin_lock_irq(&rb->s_lock);
	bio_list_add(&entry->w_ctx.bios, bio);
	spin_unlock_irq(&rb->s_lock);
@@ -407,6 +410,17 @@ static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
	return 1;
}

void pblk_rb_flush(struct pblk_rb *rb)
{
	struct pblk *pblk = container_of(rb, struct pblk, rwb);
	unsigned int mem = READ_ONCE(rb->mem);

	if (pblk_rb_sync_point_set(rb, NULL, mem))
		return;

	pblk_write_should_kick(pblk);
}

static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
				   unsigned int *pos, struct bio *bio,
				   int *io_ret)
@@ -443,15 +457,16 @@ int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
			   unsigned int nr_entries, unsigned int *pos)
{
	struct pblk *pblk = container_of(rb, struct pblk, rwb);
	int flush_done;
	int io_ret;

	spin_lock(&rb->w_lock);
	if (!pblk_rl_user_may_insert(&pblk->rl, nr_entries)) {
	io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
	if (io_ret) {
		spin_unlock(&rb->w_lock);
		return NVM_IO_REQUEUE;
		return io_ret;
	}

	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &flush_done)) {
	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
		spin_unlock(&rb->w_lock);
		return NVM_IO_REQUEUE;
	}
@@ -459,7 +474,7 @@ int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
	pblk_rl_user_in(&pblk->rl, nr_entries);
	spin_unlock(&rb->w_lock);

	return flush_done;
	return io_ret;
}

/*
Loading