Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5ebc8ec authored by Dan Williams's avatar Dan Williams
Browse files

libnvdimm, pmem: fix kmap_atomic() leak in error path



When we enounter a bad block we need to kunmap_atomic() before
returning.

Cc: <stable@vger.kernel.org>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarVishal Verma <vishal.l.verma@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent d4f32367
Loading
Loading
Loading
Loading
+7 −4
Original line number Original line Diff line number Diff line
@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, int rw,
			unsigned int len, unsigned int off, int rw,
			sector_t sector)
			sector_t sector)
{
{
	int rc = 0;
	void *mem = kmap_atomic(page);
	void *mem = kmap_atomic(page);
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;


	if (rw == READ) {
	if (rw == READ) {
		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
			return -EIO;
			rc = -EIO;
		else {
			memcpy_from_pmem(mem + off, pmem_addr, len);
			memcpy_from_pmem(mem + off, pmem_addr, len);
			flush_dcache_page(page);
			flush_dcache_page(page);
		}
	} else {
	} else {
		flush_dcache_page(page);
		flush_dcache_page(page);
		memcpy_to_pmem(pmem_addr, mem + off, len);
		memcpy_to_pmem(pmem_addr, mem + off, len);
	}
	}


	kunmap_atomic(mem);
	kunmap_atomic(mem);
	return 0;
	return rc;
}
}


static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)