Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e495a46 authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe
Browse files

lightnvm: pblk: expose generic disk name on pr_* msgs



The error messages in pblk does not say which pblk instance that
a message occurred from. Update each error message to reflect the
instance it belongs to, and also prefix it with pblk, so we know
the message comes from the pblk module.

Signed-off-by: default avatarMatias Bjørling <mb@lightnvm.io>
Reviewed-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 59a8f43b
Loading
Loading
Loading
Loading
+26 −25
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
		line = &pblk->lines[pblk_ppa_to_line(*ppa)];
		pos = pblk_ppa_to_pos(&dev->geo, *ppa);

		pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
		pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
				line->id, pos);
	}

@@ -51,12 +51,12 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
	struct ppa_addr *ppa;
	int pos = pblk_ppa_to_pos(geo, ppa_addr);

	pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
	pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
	atomic_long_inc(&pblk->erase_failed);

	atomic_dec(&line->blk_in_line);
	if (test_and_set_bit(pos, line->blk_bitmap))
		pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
		pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
							line->id, pos);

	/* Not necessary to mark bad blocks on 2.0 spec. */
@@ -274,7 +274,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
		pool = &pblk->e_rq_pool;
		break;
	default:
		pr_err("pblk: trying to free unknown rqd type\n");
		pblk_err(pblk, "trying to free unknown rqd type\n");
		return;
	}

@@ -310,7 +310,7 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,

		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
			pr_err("pblk: could not add page to bio\n");
			pblk_err(pblk, "could not add page to bio\n");
			mempool_free(page, &pblk->page_bio_pool);
			goto err;
		}
@@ -410,7 +410,7 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
		line->state = PBLK_LINESTATE_CORRUPT;
		line->gc_group = PBLK_LINEGC_NONE;
		move_list =  &l_mg->corrupt_list;
		pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
		pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
						line->id, vsc,
						line->sec_in_line,
						lm->high_thrs, lm->mid_thrs);
@@ -452,7 +452,7 @@ void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
		atomic_long_inc(&pblk->read_failed);
		break;
	default:
		pr_err("pblk: unknown read error:%d\n", rqd->error);
		pblk_err(pblk, "unknown read error:%d\n", rqd->error);
	}
#ifdef CONFIG_NVM_PBLK_DEBUG
	pblk_print_failed_rqd(pblk, rqd, rqd->error);
@@ -517,7 +517,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
	for (i = 0; i < nr_secs; i++) {
		page = vmalloc_to_page(kaddr);
		if (!page) {
			pr_err("pblk: could not map vmalloc bio\n");
			pblk_err(pblk, "could not map vmalloc bio\n");
			bio_put(bio);
			bio = ERR_PTR(-ENOMEM);
			goto out;
@@ -525,7 +525,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,

		ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
		if (ret != PAGE_SIZE) {
			pr_err("pblk: could not add page to bio\n");
			pblk_err(pblk, "could not add page to bio\n");
			bio_put(bio);
			bio = ERR_PTR(-ENOMEM);
			goto out;
@@ -711,7 +711,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
			while (test_bit(pos, line->blk_bitmap)) {
				paddr += min;
				if (pblk_boundary_paddr_checks(pblk, paddr)) {
					pr_err("pblk: corrupt emeta line:%d\n",
					pblk_err(pblk, "corrupt emeta line:%d\n",
								line->id);
					bio_put(bio);
					ret = -EINTR;
@@ -723,7 +723,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
			}

			if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
				pr_err("pblk: corrupt emeta line:%d\n",
				pblk_err(pblk, "corrupt emeta line:%d\n",
								line->id);
				bio_put(bio);
				ret = -EINTR;
@@ -738,7 +738,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,

	ret = pblk_submit_io_sync(pblk, &rqd);
	if (ret) {
		pr_err("pblk: emeta I/O submission failed: %d\n", ret);
		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
		bio_put(bio);
		goto free_rqd_dma;
	}
@@ -843,7 +843,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
	 */
	ret = pblk_submit_io_sync(pblk, &rqd);
	if (ret) {
		pr_err("pblk: smeta I/O submission failed: %d\n", ret);
		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
		bio_put(bio);
		goto free_ppa_list;
	}
@@ -905,7 +905,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
		struct nvm_tgt_dev *dev = pblk->dev;
		struct nvm_geo *geo = &dev->geo;

		pr_err("pblk: could not sync erase line:%d,blk:%d\n",
		pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
					pblk_ppa_to_line(ppa),
					pblk_ppa_to_pos(geo, ppa));

@@ -945,7 +945,7 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)

		ret = pblk_blk_erase_sync(pblk, ppa);
		if (ret) {
			pr_err("pblk: failed to erase line %d\n", line->id);
			pblk_err(pblk, "failed to erase line %d\n", line->id);
			return ret;
		}
	} while (1);
@@ -1012,7 +1012,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
		list_add_tail(&line->list, &l_mg->bad_list);
		spin_unlock(&l_mg->free_lock);

		pr_debug("pblk: line %d is bad\n", line->id);
		pblk_debug(pblk, "line %d is bad\n", line->id);

		return 0;
	}
@@ -1122,7 +1122,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
	line->cur_sec = off + lm->smeta_sec;

	if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
		pr_debug("pblk: line smeta I/O failed. Retry\n");
		pblk_debug(pblk, "line smeta I/O failed. Retry\n");
		return 0;
	}

@@ -1154,7 +1154,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
		spin_unlock(&line->lock);

		list_add_tail(&line->list, &l_mg->bad_list);
		pr_err("pblk: unexpected line %d is bad\n", line->id);
		pblk_err(pblk, "unexpected line %d is bad\n", line->id);

		return 0;
	}
@@ -1299,7 +1299,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)

retry:
	if (list_empty(&l_mg->free_list)) {
		pr_err("pblk: no free lines\n");
		pblk_err(pblk, "no free lines\n");
		return NULL;
	}

@@ -1315,7 +1315,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)

		list_add_tail(&line->list, &l_mg->bad_list);

		pr_debug("pblk: line %d is bad\n", line->id);
		pblk_debug(pblk, "line %d is bad\n", line->id);
		goto retry;
	}

@@ -1329,7 +1329,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
			list_add(&line->list, &l_mg->corrupt_list);
			goto retry;
		default:
			pr_err("pblk: failed to prepare line %d\n", line->id);
			pblk_err(pblk, "failed to prepare line %d\n", line->id);
			list_add(&line->list, &l_mg->free_list);
			l_mg->nr_free_lines++;
			return NULL;
@@ -1477,7 +1477,7 @@ static void pblk_line_close_meta_sync(struct pblk *pblk)

			ret = pblk_submit_meta_io(pblk, line);
			if (ret) {
				pr_err("pblk: sync meta line %d failed (%d)\n",
				pblk_err(pblk, "sync meta line %d failed (%d)\n",
							line->id, ret);
				return;
			}
@@ -1507,7 +1507,7 @@ void __pblk_pipeline_flush(struct pblk *pblk)

	ret = pblk_recov_pad(pblk);
	if (ret) {
		pr_err("pblk: could not close data on teardown(%d)\n", ret);
		pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
		return;
	}

@@ -1687,7 +1687,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
		struct nvm_tgt_dev *dev = pblk->dev;
		struct nvm_geo *geo = &dev->geo;

		pr_err("pblk: could not async erase line:%d,blk:%d\n",
		pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
					pblk_ppa_to_line(ppa),
					pblk_ppa_to_pos(geo, ppa));
	}
@@ -1866,7 +1866,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,

	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
	if (ret == -ETIME || ret == -EINTR)
		pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
		pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
				-ret);
}

void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+16 −16
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ static void pblk_gc_line_ws(struct work_struct *work)

	gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
	if (!gc_rq->data) {
		pr_err("pblk: could not GC line:%d (%d/%d)\n",
		pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
					line->id, *line->vsc, gc_rq->nr_secs);
		goto out;
	}
@@ -98,7 +98,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
	/* Read from GC victim block */
	ret = pblk_submit_read_gc(pblk, gc_rq);
	if (ret) {
		pr_err("pblk: failed GC read in line:%d (err:%d)\n",
		pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
								line->id, ret);
		goto out;
	}
@@ -146,7 +146,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,

	ret = pblk_line_read_emeta(pblk, line, emeta_buf);
	if (ret) {
		pr_err("pblk: line %d read emeta failed (%d)\n",
		pblk_err(pblk, "line %d read emeta failed (%d)\n",
				line->id, ret);
		pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
		return NULL;
@@ -160,7 +160,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,

	ret = pblk_recov_check_emeta(pblk, emeta_buf);
	if (ret) {
		pr_err("pblk: inconsistent emeta (line %d)\n",
		pblk_err(pblk, "inconsistent emeta (line %d)\n",
				line->id);
		pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
		return NULL;
@@ -201,7 +201,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
	} else {
		lba_list = get_lba_list_from_emeta(pblk, line);
		if (!lba_list) {
			pr_err("pblk: could not interpret emeta (line %d)\n",
			pblk_err(pblk, "could not interpret emeta (line %d)\n",
					line->id);
			goto fail_free_invalid_bitmap;
		}
@@ -213,7 +213,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
	spin_unlock(&line->lock);

	if (sec_left < 0) {
		pr_err("pblk: corrupted GC line (%d)\n", line->id);
		pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
		goto fail_free_lba_list;
	}

@@ -289,7 +289,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
	kref_put(&line->ref, pblk_line_put);
	atomic_dec(&gc->read_inflight_gc);

	pr_err("pblk: Failed to GC line %d\n", line->id);
	pblk_err(pblk, "failed to GC line %d\n", line->id);
}

static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
@@ -297,7 +297,7 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
	struct pblk_gc *gc = &pblk->gc;
	struct pblk_line_ws *line_ws;

	pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
	pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);

	line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
	if (!line_ws)
@@ -351,7 +351,7 @@ static int pblk_gc_read(struct pblk *pblk)
	pblk_gc_kick(pblk);

	if (pblk_gc_line(pblk, line))
		pr_err("pblk: failed to GC line %d\n", line->id);
		pblk_err(pblk, "failed to GC line %d\n", line->id);

	return 0;
}
@@ -523,7 +523,7 @@ static int pblk_gc_reader_ts(void *data)
	}

#ifdef CONFIG_NVM_PBLK_DEBUG
	pr_info("pblk: flushing gc pipeline, %d lines left\n",
	pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
		atomic_read(&gc->pipeline_gc));
#endif

@@ -540,7 +540,7 @@ static int pblk_gc_reader_ts(void *data)
static void pblk_gc_start(struct pblk *pblk)
{
	pblk->gc.gc_active = 1;
	pr_debug("pblk: gc start\n");
	pblk_debug(pblk, "gc start\n");
}

void pblk_gc_should_start(struct pblk *pblk)
@@ -605,14 +605,14 @@ int pblk_gc_init(struct pblk *pblk)

	gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
	if (IS_ERR(gc->gc_ts)) {
		pr_err("pblk: could not allocate GC main kthread\n");
		pblk_err(pblk, "could not allocate GC main kthread\n");
		return PTR_ERR(gc->gc_ts);
	}

	gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
							"pblk-gc-writer-ts");
	if (IS_ERR(gc->gc_writer_ts)) {
		pr_err("pblk: could not allocate GC writer kthread\n");
		pblk_err(pblk, "could not allocate GC writer kthread\n");
		ret = PTR_ERR(gc->gc_writer_ts);
		goto fail_free_main_kthread;
	}
@@ -620,7 +620,7 @@ int pblk_gc_init(struct pblk *pblk)
	gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
							"pblk-gc-reader-ts");
	if (IS_ERR(gc->gc_reader_ts)) {
		pr_err("pblk: could not allocate GC reader kthread\n");
		pblk_err(pblk, "could not allocate GC reader kthread\n");
		ret = PTR_ERR(gc->gc_reader_ts);
		goto fail_free_writer_kthread;
	}
@@ -641,7 +641,7 @@ int pblk_gc_init(struct pblk *pblk)
	gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
	if (!gc->gc_line_reader_wq) {
		pr_err("pblk: could not allocate GC line reader workqueue\n");
		pblk_err(pblk, "could not allocate GC line reader workqueue\n");
		ret = -ENOMEM;
		goto fail_free_reader_kthread;
	}
@@ -650,7 +650,7 @@ int pblk_gc_init(struct pblk *pblk)
	gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
					WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!gc->gc_reader_wq) {
		pr_err("pblk: could not allocate GC reader workqueue\n");
		pblk_err(pblk, "could not allocate GC reader workqueue\n");
		ret = -ENOMEM;
		goto fail_free_reader_line_wq;
	}
+39 −37
Original line number Diff line number Diff line
@@ -117,13 +117,13 @@ static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
	} else {
		line = pblk_recov_l2p(pblk);
		if (IS_ERR(line)) {
			pr_err("pblk: could not recover l2p table\n");
			pblk_err(pblk, "could not recover l2p table\n");
			return -EFAULT;
		}
	}

#ifdef CONFIG_NVM_PBLK_DEBUG
	pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
	pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif

	/* Free full lines directly as GC has not been started yet */
@@ -166,7 +166,7 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
static void pblk_rwb_free(struct pblk *pblk)
{
	if (pblk_rb_tear_down_check(&pblk->rwb))
		pr_err("pblk: write buffer error on tear down\n");
		pblk_err(pblk, "write buffer error on tear down\n");

	pblk_rb_data_free(&pblk->rwb);
	vfree(pblk_rb_entries_ref(&pblk->rwb));
@@ -203,7 +203,8 @@ static int pblk_rwb_init(struct pblk *pblk)
/* Minimum pages needed within a lun */
#define ADDR_POOL_SIZE 64

static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
			     struct nvm_addrf_12 *dst)
{
	struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
	int power_len;
@@ -211,14 +212,14 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
	/* Re-calculate channel and lun format to adapt to configuration */
	power_len = get_count_order(geo->num_ch);
	if (1 << power_len != geo->num_ch) {
		pr_err("pblk: supports only power-of-two channel config.\n");
		pblk_err(pblk, "supports only power-of-two channel config.\n");
		return -EINVAL;
	}
	dst->ch_len = power_len;

	power_len = get_count_order(geo->num_lun);
	if (1 << power_len != geo->num_lun) {
		pr_err("pblk: supports only power-of-two LUN config.\n");
		pblk_err(pblk, "supports only power-of-two LUN config.\n");
		return -EINVAL;
	}
	dst->lun_len = power_len;
@@ -285,18 +286,19 @@ static int pblk_set_addrf(struct pblk *pblk)
	case NVM_OCSSD_SPEC_12:
		div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
		if (mod) {
			pr_err("pblk: bad configuration of sectors/pages\n");
			pblk_err(pblk, "bad configuration of sectors/pages\n");
			return -EINVAL;
		}

		pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf);
		pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
							(void *)&pblk->addrf);
		break;
	case NVM_OCSSD_SPEC_20:
		pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
							&pblk->uaddrf);
		break;
	default:
		pr_err("pblk: OCSSD revision not supported (%d)\n",
		pblk_err(pblk, "OCSSD revision not supported (%d)\n",
								geo->version);
		return -EINVAL;
	}
@@ -375,7 +377,7 @@ static int pblk_core_init(struct pblk *pblk)
	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);

	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
		pr_err("pblk: vector list too big(%u > %u)\n",
		pblk_err(pblk, "vector list too big(%u > %u)\n",
				pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
		return -EINVAL;
	}
@@ -608,7 +610,7 @@ static int pblk_luns_init(struct pblk *pblk)

	/* TODO: Implement unbalanced LUN support */
	if (geo->num_lun < 0) {
		pr_err("pblk: unbalanced LUN config.\n");
		pblk_err(pblk, "unbalanced LUN config.\n");
		return -EINVAL;
	}

@@ -1027,7 +1029,7 @@ static int pblk_line_meta_init(struct pblk *pblk)
					lm->emeta_sec[0], geo->clba);

	if (lm->min_blk_line > lm->blk_per_line) {
		pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
		pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
							lm->blk_per_line);
		return -EINVAL;
	}
@@ -1079,7 +1081,7 @@ static int pblk_lines_init(struct pblk *pblk)
	}

	if (!nr_free_chks) {
		pr_err("pblk: too many bad blocks prevent for sane instance\n");
		pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
		return -EINTR;
	}

@@ -1109,7 +1111,7 @@ static int pblk_writer_init(struct pblk *pblk)
		int err = PTR_ERR(pblk->writer_ts);

		if (err != -EINTR)
			pr_err("pblk: could not allocate writer kthread (%d)\n",
			pblk_err(pblk, "could not allocate writer kthread (%d)\n",
					err);
		return err;
	}
@@ -1155,7 +1157,7 @@ static void pblk_tear_down(struct pblk *pblk, bool graceful)
	pblk_rb_sync_l2p(&pblk->rwb);
	pblk_rl_free(&pblk->rl);

	pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
	pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
}

static void pblk_exit(void *private, bool graceful)
@@ -1167,7 +1169,7 @@ static void pblk_exit(void *private, bool graceful)
	pblk_tear_down(pblk, graceful);

#ifdef CONFIG_NVM_PBLK_DEBUG
	pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
	pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif

	pblk_free(pblk);
@@ -1190,29 +1192,30 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
	struct pblk *pblk;
	int ret;

	/* pblk supports 1.2 and 2.0 versions */
	pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
	if (!pblk)
		return ERR_PTR(-ENOMEM);

	pblk->dev = dev;
	pblk->disk = tdisk;
	pblk->state = PBLK_STATE_RUNNING;
	pblk->gc.gc_enabled = 0;

	if (!(geo->version == NVM_OCSSD_SPEC_12 ||
					geo->version == NVM_OCSSD_SPEC_20)) {
		pr_err("pblk: OCSSD version not supported (%u)\n",
		pblk_err(pblk, "OCSSD version not supported (%u)\n",
							geo->version);
		kfree(pblk);
		return ERR_PTR(-EINVAL);
	}

	if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
		pr_err("pblk: host-side L2P table not supported. (%x)\n",
		pblk_err(pblk, "host-side L2P table not supported. (%x)\n",
							geo->dom);
		kfree(pblk);
		return ERR_PTR(-EINVAL);
	}

	pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
	if (!pblk)
		return ERR_PTR(-ENOMEM);

	pblk->dev = dev;
	pblk->disk = tdisk;
	pblk->state = PBLK_STATE_RUNNING;
	pblk->gc.gc_enabled = 0;

	spin_lock_init(&pblk->resubmit_lock);
	spin_lock_init(&pblk->trans_lock);
	spin_lock_init(&pblk->lock);
@@ -1242,38 +1245,38 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,

	ret = pblk_core_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize core\n");
		pblk_err(pblk, "could not initialize core\n");
		goto fail;
	}

	ret = pblk_lines_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize lines\n");
		pblk_err(pblk, "could not initialize lines\n");
		goto fail_free_core;
	}

	ret = pblk_rwb_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize write buffer\n");
		pblk_err(pblk, "could not initialize write buffer\n");
		goto fail_free_lines;
	}

	ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
	if (ret) {
		pr_err("pblk: could not initialize maps\n");
		pblk_err(pblk, "could not initialize maps\n");
		goto fail_free_rwb;
	}

	ret = pblk_writer_init(pblk);
	if (ret) {
		if (ret != -EINTR)
			pr_err("pblk: could not initialize write thread\n");
			pblk_err(pblk, "could not initialize write thread\n");
		goto fail_free_l2p;
	}

	ret = pblk_gc_init(pblk);
	if (ret) {
		pr_err("pblk: could not initialize gc\n");
		pblk_err(pblk, "could not initialize gc\n");
		goto fail_stop_writer;
	}

@@ -1288,8 +1291,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
	blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);

	pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
			tdisk->disk_name,
	pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
			geo->all_luns, pblk->l_mg.nr_lines,
			(unsigned long long)pblk->rl.nr_secs,
			pblk->rwb.nr_entries);
+4 −4
Original line number Diff line number Diff line
@@ -547,7 +547,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,

		page = virt_to_page(entry->data);
		if (!page) {
			pr_err("pblk: could not allocate write bio page\n");
			pblk_err(pblk, "could not allocate write bio page\n");
			flags &= ~PBLK_WRITTEN_DATA;
			flags |= PBLK_SUBMITTED_ENTRY;
			/* Release flags on context. Protect from writes */
@@ -557,7 +557,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,

		if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
								rb->seg_size) {
			pr_err("pblk: could not add page to write bio\n");
			pblk_err(pblk, "could not add page to write bio\n");
			flags &= ~PBLK_WRITTEN_DATA;
			flags |= PBLK_SUBMITTED_ENTRY;
			/* Release flags on context. Protect from writes */
@@ -576,14 +576,14 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,

	if (pad) {
		if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
			pr_err("pblk: could not pad page in write bio\n");
			pblk_err(pblk, "could not pad page in write bio\n");
			return NVM_IO_ERR;
		}

		if (pad < pblk->min_write_pgs)
			atomic64_inc(&pblk->pad_dist[pad - 1]);
		else
			pr_warn("pblk: padding more than min. sectors\n");
			pblk_warn(pblk, "padding more than min. sectors\n");

		atomic64_add(pad, &pblk->pad_wa);
	}
+13 −12
Original line number Diff line number Diff line
@@ -121,9 +121,9 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
			struct ppa_addr *p;

			p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
			print_ppa(&pblk->dev->geo, p, "seq", i);
			print_ppa(pblk, p, "seq", i);
#endif
			pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
							lba, (u64)blba + i);
			WARN_ON(1);
		}
@@ -154,9 +154,9 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
			int nr_ppas = rqd->nr_ppas;

			p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
			print_ppa(&pblk->dev->geo, p, "seq", j);
			print_ppa(pblk, p, "seq", j);
#endif
			pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
								lba, meta_lba);
			WARN_ON(1);
		}
@@ -256,7 +256,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
		goto fail_add_pages;

	if (nr_holes != new_bio->bi_vcnt) {
		pr_err("pblk: malformed bio\n");
		pblk_err(pblk, "malformed bio\n");
		goto fail;
	}

@@ -279,7 +279,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
	ret = pblk_submit_io_sync(pblk, rqd);
	if (ret) {
		bio_put(rqd->bio);
		pr_err("pblk: sync read IO submission failed\n");
		pblk_err(pblk, "sync read IO submission failed\n");
		goto fail;
	}

@@ -346,7 +346,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
	/* Free allocated pages in new bio */
	pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
fail_add_pages:
	pr_err("pblk: failed to perform partial read\n");
	pblk_err(pblk, "failed to perform partial read\n");
	__pblk_end_io_read(pblk, rqd, false);
	return NVM_IO_ERR;
}
@@ -436,7 +436,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
							&rqd->dma_meta_list);
	if (!rqd->meta_list) {
		pr_err("pblk: not able to allocate ppa list\n");
		pblk_err(pblk, "not able to allocate ppa list\n");
		goto fail_rqd_free;
	}

@@ -462,14 +462,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
		/* Clone read bio to deal with read errors internally */
		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
		if (!int_bio) {
			pr_err("pblk: could not clone read bio\n");
			pblk_err(pblk, "could not clone read bio\n");
			goto fail_end_io;
		}

		rqd->bio = int_bio;

		if (pblk_submit_io(pblk, rqd)) {
			pr_err("pblk: read IO submission failed\n");
			pblk_err(pblk, "read IO submission failed\n");
			ret = NVM_IO_ERR;
			goto fail_end_io;
		}
@@ -595,7 +595,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
	bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
						PBLK_VMALLOC_META, GFP_KERNEL);
	if (IS_ERR(bio)) {
		pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
		pblk_err(pblk, "could not allocate GC bio (%lu)\n",
				PTR_ERR(bio));
		goto err_free_dma;
	}

@@ -609,7 +610,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)

	if (pblk_submit_io_sync(pblk, &rqd)) {
		ret = -EIO;
		pr_err("pblk: GC read request failed\n");
		pblk_err(pblk, "GC read request failed\n");
		goto err_free_bio;
	}

Loading