Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2962c0fd authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes I27ae3e16,I4f151806,I04ebe7fc,I31072f65,I0f986787,If16fc639 into msm-next

* changes:
  soc: qcom: smem: Support getting cached entries
  soc: qcom: smem: Rename "uncached" accessors
  ARM: dts: msm: Add smem configuration on sdm855
  defconfig: arm64: sdm855: Enable SMEM
  ARM: dts: msm: Add remote spinlock configuration for sdm855
  defconfig: arm64: sdm855: Enable hardware spinlocks
parents 5584d359 b4dfd872
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -333,6 +333,11 @@
			size = <0x0 0x2000000>;
			linux,cma-default;
		};

		smem_region: smem@86000000 {
			no-map;
			reg = <0x0 0x86000000 0x0 0x200000>;
		};
	};
};

@@ -561,6 +566,23 @@
			snps,hird-threshold = /bits/ 8 <0x10>;
		};
	};

	tcsr_mutex_block: syscon@1f40000 {
		compatible = "syscon";
		reg = <0x1f40000 0x20000>;
	};

	tcsr_mutex: hwlock@1f40000 {
		compatible = "qcom,tcsr-mutex";
		syscon = <&tcsr_mutex_block 0 0x1000>;
		#hwlock-cells = <1>;
	};

	smem: qcom,smem@8600000 {
		compatible = "qcom,smem";
		memory-region = <&smem_region>;
		hwlocks = <&tcsr_mutex 3>;
	};
};

&emac_gdsc {
+3 −0
Original line number Diff line number Diff line
@@ -301,7 +301,10 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SCM=y
CONFIG_IIO=y
CONFIG_PWM=y
+3 −0
Original line number Diff line number Diff line
@@ -307,7 +307,10 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SCM=y
CONFIG_IIO=y
CONFIG_PWM=y
+73 −20
Original line number Diff line number Diff line
@@ -52,7 +52,8 @@
 *
 * Items in the non-cached region are allocated from the start of the partition
 * while items in the cached region are allocated from the end. The free area
 * is hence the region between the cached and non-cached offsets.
 * is hence the region between the cached and non-cached offsets. The header of
 * cached items comes after the data.
 *
 *
 * To synchronize allocations in the shared memory heaps a remote spinlock must
@@ -140,6 +141,7 @@ struct smem_header {
 * @flags:	flags for the partition (currently unused)
 * @host0:	first processor/host with access to this partition
 * @host1:	second processor/host with access to this partition
 * @cacheline:	alignment for "cached" entries
 * @reserved:	reserved entries for later use
 */
struct smem_ptable_entry {
@@ -148,7 +150,8 @@ struct smem_ptable_entry {
	__le32 flags;
	__le16 host0;
	__le16 host1;
	__le32 reserved[8];
	__le32 cacheline;
	__le32 reserved[7];
};

/**
@@ -230,6 +233,7 @@ struct smem_region {
 * @hwlock:	reference to a hwspinlock
 * @partitions:	list of pointers to partitions affecting the current
 *		processor/host
 * @cacheline:	list of cacheline sizes for each host
 * @num_regions: number of @regions
 * @regions:	list of the memory regions defining the shared memory
 */
@@ -239,20 +243,29 @@ struct qcom_smem {
	struct hwspinlock *hwlock;

	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
	size_t cacheline[SMEM_HOST_COUNT];

	unsigned num_regions;
	struct smem_region regions[0];
};

static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr)
phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
{
	void *p = phdr;

	return p + le32_to_cpu(phdr->offset_free_uncached);
}

static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
					size_t cacheline)
{
	void *p = phdr;

	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
}

static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
{
	void *p = phdr;

@@ -260,7 +273,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
}

static struct smem_private_entry *
phdr_to_first_private_entry(struct smem_partition_header *phdr)
phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
{
	void *p = phdr;

@@ -268,7 +281,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr)
}

static struct smem_private_entry *
private_entry_next(struct smem_private_entry *e)
uncached_entry_next(struct smem_private_entry *e)
{
	void *p = e;

@@ -276,13 +289,28 @@ private_entry_next(struct smem_private_entry *e)
	       le32_to_cpu(e->size);
}

static void *entry_to_item(struct smem_private_entry *e)
static struct smem_private_entry *
cached_entry_next(struct smem_private_entry *e, size_t cacheline)
{
	void *p = e;

	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
}

static void *uncached_entry_to_item(struct smem_private_entry *e)
{
	void *p = e;

	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}

static void *cached_entry_to_item(struct smem_private_entry *e)
{
	void *p = e;

	return p - le32_to_cpu(e->size);
}

/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;

@@ -300,9 +328,9 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
	void *cached;

	phdr = smem->partitions[host];
	hdr = phdr_to_first_private_entry(phdr);
	end = phdr_to_last_private_entry(phdr);
	cached = phdr_to_first_cached_entry(phdr);
	hdr = phdr_to_first_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);
	cached = phdr_to_last_cached_entry(phdr);

	while (hdr < end) {
		if (hdr->canary != SMEM_PRIVATE_CANARY) {
@@ -315,7 +343,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
		if (le16_to_cpu(hdr->item) == item)
			return -EEXIST;

		hdr = private_entry_next(hdr);
		hdr = uncached_entry_next(hdr);
	}

	/* Check that we don't grow into the cached region */
@@ -458,31 +486,55 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
{
	struct smem_partition_header *phdr;
	struct smem_private_entry *e, *end;
	size_t cacheline;

	phdr = smem->partitions[host];
	e = phdr_to_first_private_entry(phdr);
	end = phdr_to_last_private_entry(phdr);
	cacheline = smem->cacheline[host];

	e = phdr_to_first_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);

	while (e < end) {
		if (e->canary != SMEM_PRIVATE_CANARY) {
			dev_err(smem->dev,
				"Found invalid canary in host %d partition\n",
				host);
			return ERR_PTR(-EINVAL);
		if (e->canary != SMEM_PRIVATE_CANARY)
			goto invalid_canary;

		if (le16_to_cpu(e->item) == item) {
			if (size != NULL)
				*size = le32_to_cpu(e->size) -
					le16_to_cpu(e->padding_data);

			return uncached_entry_to_item(e);
		}

		e = uncached_entry_next(e);
	}

	/* Item was not found in the uncached list, search the cached list */

	e = phdr_to_first_cached_entry(phdr, cacheline);
	end = phdr_to_last_cached_entry(phdr);

	while (e > end) {
		if (e->canary != SMEM_PRIVATE_CANARY)
			goto invalid_canary;

		if (le16_to_cpu(e->item) == item) {
			if (size != NULL)
				*size = le32_to_cpu(e->size) -
					le16_to_cpu(e->padding_data);

			return entry_to_item(e);
			return cached_entry_to_item(e);
		}

		e = private_entry_next(e);
		e = cached_entry_next(e, cacheline);
	}

	return ERR_PTR(-ENOENT);

invalid_canary:
	dev_err(smem->dev, "Found invalid canary in host %d partition\n", host);

	return ERR_PTR(-EINVAL);
}

/**
@@ -659,6 +711,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
		}

		smem->partitions[remote_host] = header;
		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
	}

	return 0;