Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73d28921 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman
Browse files

IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields

commit d39bf40e55e666b5905fdbd46a0dced030ce87be upstream.

Overflowing either addrlimit or bytes_togo can allow userspace to trigger
a buffer overflow of kernel memory. Check for overflows in all the places
doing math on user controlled buffers.

Fixes: f931551b ("IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters")
Link: https://lore.kernel.org/r/20211012175519.7298.77738.stgit@awfm-01.cornelisnetworks.com


Reported-by: default avatarIlja Van Sprundel <ivansprundel@ioactive.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cde048c5
Loading
Loading
Loading
Loading
+23 −10
Original line number Diff line number Diff line
@@ -606,7 +606,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
/*
 * How many pages in this iovec element?
 */
static int qib_user_sdma_num_pages(const struct iovec *iov)
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
	const unsigned long addr  = (unsigned long) iov->iov_base;
	const unsigned long  len  = iov->iov_len;
@@ -662,7 +662,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
				   struct qib_user_sdma_queue *pq,
				   struct qib_user_sdma_pkt *pkt,
				   unsigned long addr, int tlen, int npages)
				   unsigned long addr, int tlen, size_t npages)
{
	struct page *pages[8];
	int i, j;
@@ -726,7 +726,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
	unsigned long idx;

	for (idx = 0; idx < niov; idx++) {
		const int npages = qib_user_sdma_num_pages(iov + idx);
		const size_t npages = qib_user_sdma_num_pages(iov + idx);
		const unsigned long addr = (unsigned long) iov[idx].iov_base;

		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@@ -828,8 +828,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
		unsigned pktnw;
		unsigned pktnwc;
		int nfrags = 0;
		int npages = 0;
		int bytes_togo = 0;
		size_t npages = 0;
		size_t bytes_togo = 0;
		int tiddma = 0;
		int cfur;

@@ -889,7 +889,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,

			npages += qib_user_sdma_num_pages(&iov[idx]);

			bytes_togo += slen;
			if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
			    bytes_togo > type_max(typeof(pkt->bytes_togo))) {
				ret = -EINVAL;
				goto free_pbc;
			}
			pktnwc += slen >> 2;
			idx++;
			nfrags++;
@@ -908,8 +912,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
		}

		if (frag_size) {
			int tidsmsize, n;
			size_t pktsize;
			size_t tidsmsize, n, pktsize, sz, addrlimit;

			n = npages*((2*PAGE_SIZE/frag_size)+1);
			pktsize = struct_size(pkt, addr, n);
@@ -927,14 +930,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
			else
				tidsmsize = 0;

			pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
			if (check_add_overflow(pktsize, tidsmsize, &sz)) {
				ret = -EINVAL;
				goto free_pbc;
			}
			pkt = kmalloc(sz, GFP_KERNEL);
			if (!pkt) {
				ret = -ENOMEM;
				goto free_pbc;
			}
			pkt->largepkt = 1;
			pkt->frag_size = frag_size;
			pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
			if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
					       &addrlimit) ||
			    addrlimit > type_max(typeof(pkt->addrlimit))) {
				ret = -EINVAL;
				goto free_pbc;
			}
			pkt->addrlimit = addrlimit;

			if (tiddma) {
				char *tidsm = (char *)pkt + pktsize;