Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58e40179 authored by Sarannya S's avatar Sarannya S Committed by Pranav Mahesh Phansalkar
Browse files

soc: qcom: smem: Add boundary checks for partitions



Add condition check to make sure that the end address
of private entry does not go out of partition.

Change-Id: I88b3c69d86d90905b214c13a8c632b134b487a49
Signed-off-by: default avatarSarannya S <quic_sarannya@quicinc.com>
Signed-off-by: default avatarPranav Mahesh Phansalkar <quic_pphansal@quicinc.com>
parent 2cf7f335
Loading
Loading
Loading
Loading
+71 −34
Original line number Original line Diff line number Diff line
@@ -2,6 +2,7 @@
/*
/*
 * Copyright (c) 2015, Sony Mobile Communications AB.
 * Copyright (c) 2015, Sony Mobile Communications AB.
 * Copyright (c) 2012-2013, 2019-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2012-2013, 2019-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
 */
 */


#include <linux/hwspinlock.h>
#include <linux/hwspinlock.h>
@@ -86,6 +87,17 @@
/* Max number of processors/hosts in a system */
/* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT		14
#define SMEM_HOST_COUNT		14


/* Entry range check
 * ptr >= start : Checks if ptr is greater than the start of access region
 * ptr + size >= ptr: Check for integer overflow (On 32bit system where ptr
 * and size are 32bits, ptr + size can wrap around to be a small integer)
 * ptr + size <= end: Checks if ptr+size is less than the end of access region
 */
#define IN_PARTITION_RANGE(ptr, size, start, end)		\
	(((void *)(ptr) >= (void *)(start)) &&			\
	 (((void *)(ptr) + (size)) >= (void *)(ptr)) &&	\
	 (((void *)(ptr) + (size)) <= (void *)(end)))

/**
/**
  * struct smem_proc_comm - proc_comm communication struct (legacy)
  * struct smem_proc_comm - proc_comm communication struct (legacy)
  * @command:	current command to be executed
  * @command:	current command to be executed
@@ -353,6 +365,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
				   size_t size)
				   size_t size)
{
{
	struct smem_private_entry *hdr, *end;
	struct smem_private_entry *hdr, *end;
	struct smem_private_entry *next_hdr;
	struct smem_partition_header *phdr;
	struct smem_partition_header *phdr;
	size_t alloc_size;
	size_t alloc_size;
	void *cached;
	void *cached;
@@ -365,18 +378,25 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
	end = phdr_to_last_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);
	cached = phdr_to_last_cached_entry(phdr);
	cached = phdr_to_last_cached_entry(phdr);


	if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
	if (WARN_ON(!IN_PARTITION_RANGE(end, 0, phdr, cached) ||
						cached > p_end))
		return -EINVAL;
		return -EINVAL;


	while (hdr < end) {
	while ((hdr < end) && ((hdr + 1) < end)) {
		if (hdr->canary != SMEM_PRIVATE_CANARY)
		if (hdr->canary != SMEM_PRIVATE_CANARY)
			goto bad_canary;
			goto bad_canary;
		if (le16_to_cpu(hdr->item) == item)
		if (le16_to_cpu(hdr->item) == item)
			return -EEXIST;
			return -EEXIST;


		hdr = uncached_entry_next(hdr);
		next_hdr = uncached_entry_next(hdr);

		if (WARN_ON(next_hdr <= hdr))
			return -EINVAL;

		hdr = next_hdr;
	}
	}
	if (WARN_ON((void *)hdr > p_end))

	if (WARN_ON((void *)hdr > (void *)end))
		return -EINVAL;
		return -EINVAL;


	/* Check that we don't grow into the cached region */
	/* Check that we don't grow into the cached region */
@@ -534,9 +554,11 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
				   unsigned item,
				   unsigned item,
				   size_t *size)
				   size_t *size)
{
{
	struct smem_private_entry *e, *end;
	struct smem_private_entry *e, *uncached_end, *cached_end;
	struct smem_private_entry *next_e;
	struct smem_partition_header *phdr;
	struct smem_partition_header *phdr;
	void *item_ptr, *p_end;
	void *item_ptr, *p_end;
	size_t entry_size = 0;
	u32 partition_size;
	u32 partition_size;
	size_t cacheline;
	size_t cacheline;
	u32 padding_data;
	u32 padding_data;
@@ -548,72 +570,87 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
	cacheline = le32_to_cpu(entry->cacheline);
	cacheline = le32_to_cpu(entry->cacheline);


	e = phdr_to_first_uncached_entry(phdr);
	e = phdr_to_first_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);
	uncached_end = phdr_to_last_uncached_entry(phdr);
	cached_end = phdr_to_last_cached_entry(phdr);


	if (WARN_ON((void *)end > p_end))
	if (WARN_ON(!IN_PARTITION_RANGE(uncached_end, 0, phdr, cached_end)
					|| (void *)cached_end > p_end))
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	while (e < end) {
	while ((e < uncached_end) && ((e + 1) < uncached_end)) {
		if (e->canary != SMEM_PRIVATE_CANARY)
		if (e->canary != SMEM_PRIVATE_CANARY)
			goto invalid_canary;
			goto invalid_canary;


		if (le16_to_cpu(e->item) == item) {
		if (le16_to_cpu(e->item) == item) {
			if (size != NULL) {
			e_size = le32_to_cpu(e->size);
			e_size = le32_to_cpu(e->size);
			padding_data = le16_to_cpu(e->padding_data);
			padding_data = le16_to_cpu(e->padding_data);


				if (e_size < partition_size
			if (e_size < partition_size && padding_data < e_size)
				    && padding_data < e_size)
				entry_size = e_size - padding_data;
					*size = e_size - padding_data;
			else
			else
				return ERR_PTR(-EINVAL);
				return ERR_PTR(-EINVAL);
			}


			item_ptr =  uncached_entry_to_item(e);
			item_ptr =  uncached_entry_to_item(e);
			if (WARN_ON(item_ptr > p_end))

			if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, e, uncached_end)))
				return ERR_PTR(-EINVAL);
				return ERR_PTR(-EINVAL);


			if (size != NULL)
				*size = entry_size;

			return item_ptr;
			return item_ptr;
		}
		}


		e = uncached_entry_next(e);
		next_e = uncached_entry_next(e);
		if (WARN_ON(next_e <= e))
			return ERR_PTR(-EINVAL);

		e = next_e;
	}
	}
	if (WARN_ON((void *)e > p_end))
	if (WARN_ON((void *)e > (void *)uncached_end))
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	/* Item was not found in the uncached list, search the cached list */
	/* Item was not found in the uncached list, search the cached list */


	if (cached_end == p_end)
		return ERR_PTR(-ENOENT);

	e = phdr_to_first_cached_entry(phdr, cacheline);
	e = phdr_to_first_cached_entry(phdr, cacheline);
	end = phdr_to_last_cached_entry(phdr);


	if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
	if (WARN_ON(!IN_PARTITION_RANGE(cached_end, 0, uncached_end, p_end) ||
			!IN_PARTITION_RANGE(e, sizeof(*e), cached_end, p_end)))
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	while (e > end) {
	while (e > cached_end) {
		if (e->canary != SMEM_PRIVATE_CANARY)
		if (e->canary != SMEM_PRIVATE_CANARY)
			goto invalid_canary;
			goto invalid_canary;


		if (le16_to_cpu(e->item) == item) {
		if (le16_to_cpu(e->item) == item) {
			if (size != NULL) {
			e_size = le32_to_cpu(e->size);
			e_size = le32_to_cpu(e->size);
			padding_data = le16_to_cpu(e->padding_data);
			padding_data = le16_to_cpu(e->padding_data);


				if (e_size < partition_size
			if (e_size < partition_size && padding_data < e_size)
				    && padding_data < e_size)
				entry_size  = e_size - padding_data;
					*size = e_size - padding_data;
			else
			else
				return ERR_PTR(-EINVAL);
				return ERR_PTR(-EINVAL);
			}


			item_ptr =  cached_entry_to_item(e);
			item_ptr =  cached_entry_to_item(e);
			if (WARN_ON(item_ptr < (void *)phdr))
			if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, cached_end, e)))
				return ERR_PTR(-EINVAL);
				return ERR_PTR(-EINVAL);


			if (size != NULL)
				*size = entry_size;

			return item_ptr;
			return item_ptr;
		}
		}


		e = cached_entry_next(e, cacheline);
		next_e = cached_entry_next(e, cacheline);
		if (WARN_ON(next_e >= e))
			return ERR_PTR(-EINVAL);

		e = next_e;
	}
	}

	if (WARN_ON((void *)e < (void *)phdr))
	if (WARN_ON((void *)e < (void *)phdr))
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);