Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d1bc912 authored by Nisheeth Bhat's avatar Nisheeth Bhat Committed by Matthew Wilcox
Browse files

Fix calculation of number of pages in a PRP List



The existing calculation underestimated the number of pages required
as it did not take into account the pointer at the end of each page.
The replacement calculation may overestimate the number of pages required
if the last page in the PRP List is entirely full.  By using ->npages
as a counter as we fill in the pages, we ensure that we don't try to
free a page that was never allocated.

Signed-off-by: default avatarNisheeth Bhat <nisheeth.bhat@intel.com>
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent bc5fc7e4
Loading
Loading
Loading
Loading
+7 −7
Original line number Original line Diff line number Diff line
@@ -265,7 +265,7 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
}
}


struct nvme_prps {
struct nvme_prps {
	int npages;
	int npages;		/* 0 means small pool in use */
	dma_addr_t first_dma;
	dma_addr_t first_dma;
	__le64 *list[0];
	__le64 *list[0];
};
};
@@ -347,7 +347,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
	int offset = offset_in_page(dma_addr);
	int offset = offset_in_page(dma_addr);
	__le64 *prp_list;
	__le64 *prp_list;
	dma_addr_t prp_dma;
	dma_addr_t prp_dma;
	int nprps, npages, i, prp_page;
	int nprps, npages, i;
	struct nvme_prps *prps = NULL;
	struct nvme_prps *prps = NULL;


	cmd->prp1 = cpu_to_le64(dma_addr);
	cmd->prp1 = cpu_to_le64(dma_addr);
@@ -370,20 +370,20 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
	}
	}


	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
	nprps = DIV_ROUND_UP(length, PAGE_SIZE);
	npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
	npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
	prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
	prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
	if (!prps) {
	if (!prps) {
		cmd->prp2 = cpu_to_le64(dma_addr);
		cmd->prp2 = cpu_to_le64(dma_addr);
		*len = (*len - length) + PAGE_SIZE;
		*len = (*len - length) + PAGE_SIZE;
		return prps;
		return prps;
	}
	}
	prp_page = 0;

	if (nprps <= (256 / 8)) {
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
		pool = dev->prp_small_pool;
		prps->npages = 0;
		prps->npages = 0;
	} else {
	} else {
		pool = dev->prp_page_pool;
		pool = dev->prp_page_pool;
		prps->npages = npages;
		prps->npages = 1;
	}
	}


	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
	prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
@@ -393,7 +393,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
		kfree(prps);
		kfree(prps);
		return NULL;
		return NULL;
	}
	}
	prps->list[prp_page++] = prp_list;
	prps->list[0] = prp_list;
	prps->first_dma = prp_dma;
	prps->first_dma = prp_dma;
	cmd->prp2 = cpu_to_le64(prp_dma);
	cmd->prp2 = cpu_to_le64(prp_dma);
	i = 0;
	i = 0;
@@ -405,7 +405,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
				*len = (*len - length);
				*len = (*len - length);
				return prps;
				return prps;
			}
			}
			prps->list[prp_page++] = prp_list;
			prps->list[prps->npages++] = prp_list;
			prp_list[0] = old_prp_list[i - 1];
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
			i = 1;