Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5220e65f authored by Neeraj Soni's avatar Neeraj Soni Committed by Blagovest Kolenichev
Browse files

mmc: host: Add variant ops for cqhci crypto



QTI implementation for block keyslot manager and
crypto vops for crypto support in CQHCI.

Change-Id: I9b64f85ca97c269a6ecd6fde2bb693745d4c43d4
Signed-off-by: default avatarNeeraj Soni <neersoni@codeaurora.org>
parent 2543833e
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -946,6 +946,7 @@ config MMC_SDHCI_OMAP
	  If you have a controller with this interface, say Y or M here.

	  If unsure, say N.

config MMC_CQHCI_CRYPTO
	bool "CQHCI Crypto Engine Support"
	depends on MMC_CQHCI && BLK_INLINE_ENCRYPTION
@@ -954,3 +955,11 @@ config MMC_CQHCI_CRYPTO
	  Enabling this makes it possible for the kernel to use the crypto
	  capabilities of the CQHCI device (if present) to perform crypto
	  operations on data being transferred to/from the device.

config MMC_CQHCI_CRYPTO_QTI
	bool "Vendor specific CQHCI Crypto Engine Support"
	depends on MMC_CQHCI_CRYPTO
	help
	 Enable Vendor Crypto Engine Support in CQHCI
	 Enabling this allows kernel to use CQHCI crypto operations defined
	 and implemented by QTI.
+1 −0
Original line number Diff line number Diff line
@@ -93,6 +93,7 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP)		+= sdhci-omap.o
obj-$(CONFIG_MMC_CQHCI)			+= cqhci.o
obj-$(CONFIG_MMC_CQHCI_CRYPTO)		+= cqhci-crypto.o
obj-$(CONFIG_MMC_CQHCI_CRYPTO_QTI)	+= cqhci-crypto-qti.o

ifeq ($(CONFIG_CB710_DEBUG),y)
	CFLAGS-cb710-mmc	+= -DDEBUG
+300 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2020, Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <crypto/algapi.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-msm.h"
#include "cqhci-crypto-qti.h"
#include <linux/crypto-qti-common.h>

#define RAW_SECRET_SIZE 32
#define MINIMUM_DUN_SIZE 512
#define MAXIMUM_DUN_SIZE 65536

static struct cqhci_host_crypto_variant_ops cqhci_crypto_qti_variant_ops = {
	.host_init_crypto = cqhci_crypto_qti_init_crypto,
	.enable = cqhci_crypto_qti_enable,
	.disable = cqhci_crypto_qti_disable,
	.resume = cqhci_crypto_qti_resume,
	.debug = cqhci_crypto_qti_debug,
};

static bool ice_cap_idx_valid(struct cqhci_host *host,
					unsigned int cap_idx)
{
	return cap_idx < host->crypto_capabilities.num_crypto_cap;
}

static uint8_t get_data_unit_size_mask(unsigned int data_unit_size)
{
	if (data_unit_size < MINIMUM_DUN_SIZE ||
		data_unit_size > MAXIMUM_DUN_SIZE ||
	    !is_power_of_2(data_unit_size))
		return 0;

	return data_unit_size / MINIMUM_DUN_SIZE;
}


void cqhci_crypto_qti_enable(struct cqhci_host *host)
{
	int err = 0;

	if (!cqhci_host_is_crypto_supported(host))
		return;

	host->caps |= CQHCI_CAP_CRYPTO_SUPPORT;

	err = crypto_qti_enable(host->crypto_vops->priv);
	if (err) {
		pr_err("%s: Error enabling crypto, err %d\n",
				__func__, err);
		cqhci_crypto_qti_disable(host);
	}
}

void cqhci_crypto_qti_disable(struct cqhci_host *host)
{
	cqhci_crypto_disable_spec(host);
	crypto_qti_disable(host->crypto_vops->priv);
}

static int cqhci_crypto_qti_keyslot_program(struct keyslot_manager *ksm,
					    const struct blk_crypto_key *key,
					    unsigned int slot)
{
	struct cqhci_host *host = keyslot_manager_private(ksm);
	int err = 0;
	u8 data_unit_mask;
	int crypto_alg_id;

	crypto_alg_id = cqhci_crypto_cap_find(host, key->crypto_mode,
					       key->data_unit_size);

	if (!cqhci_is_crypto_enabled(host) ||
	    !cqhci_keyslot_valid(host, slot) ||
	    !ice_cap_idx_valid(host, crypto_alg_id)) {
		return -EINVAL;
	}

	data_unit_mask = get_data_unit_size_mask(key->data_unit_size);

	if (!(data_unit_mask &
	      host->crypto_cap_array[crypto_alg_id].sdus_mask)) {
		return -EINVAL;
	}

	err = crypto_qti_keyslot_program(host->crypto_vops->priv, key,
					 slot, data_unit_mask, crypto_alg_id);
	if (err)
		pr_err("%s: failed with error %d\n", __func__, err);

	return err;
}

static int cqhci_crypto_qti_keyslot_evict(struct keyslot_manager *ksm,
					  const struct blk_crypto_key *key,
					  unsigned int slot)
{
	int err = 0;
	struct cqhci_host *host = keyslot_manager_private(ksm);

	if (!cqhci_is_crypto_enabled(host) ||
	    !cqhci_keyslot_valid(host, slot))
		return -EINVAL;

	err = crypto_qti_keyslot_evict(host->crypto_vops->priv, slot);
	if (err)
		pr_err("%s: failed with error %d\n", __func__, err);

	return err;
}

static int cqhci_crypto_qti_derive_raw_secret(struct keyslot_manager *ksm,
		const u8 *wrapped_key, unsigned int wrapped_key_size,
		u8 *secret, unsigned int secret_size)
{
	int err = 0;

	if (wrapped_key_size <= RAW_SECRET_SIZE) {
		pr_err("%s: Invalid wrapped_key_size: %u\n", __func__,
			wrapped_key_size);
		err = -EINVAL;
		return err;
	}
	if (secret_size != RAW_SECRET_SIZE) {
		pr_err("%s: Invalid secret size: %u\n", __func__, secret_size);
		err = -EINVAL;
		return err;
	}
	memcpy(secret, wrapped_key, secret_size);
	return 0;
}

static const struct keyslot_mgmt_ll_ops cqhci_crypto_qti_ksm_ops = {
	.keyslot_program	= cqhci_crypto_qti_keyslot_program,
	.keyslot_evict		= cqhci_crypto_qti_keyslot_evict,
	.derive_raw_secret	= cqhci_crypto_qti_derive_raw_secret
};

enum blk_crypto_mode_num cqhci_blk_crypto_qti_mode_num_for_alg_dusize(
	enum cqhci_crypto_alg cqhci_crypto_alg,
	enum cqhci_crypto_key_size key_size)
{
	/*
	 * Currently the only mode that eMMC and blk-crypto both support.
	 */
	if (cqhci_crypto_alg == CQHCI_CRYPTO_ALG_AES_XTS &&
		key_size == CQHCI_CRYPTO_KEY_SIZE_256)
		return BLK_ENCRYPTION_MODE_AES_256_XTS;

	return BLK_ENCRYPTION_MODE_INVALID;
}

int cqhci_host_init_crypto_qti_spec(struct cqhci_host *host,
				    const struct keyslot_mgmt_ll_ops *ksm_ops)
{
	int cap_idx = 0;
	int err = 0;
	unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
	enum blk_crypto_mode_num blk_mode_num;

	/* Default to disabling crypto */
	host->caps &= ~CQHCI_CAP_CRYPTO_SUPPORT;

	if (!(cqhci_readl(host, CQHCI_CAP) & CQHCI_CAP_CS)) {
		pr_debug("%s no crypto capability\n", __func__);
		err = -ENODEV;
		goto out;
	}

	/*
	 * Crypto Capabilities should never be 0, because the
	 * config_array_ptr > 04h. So we use a 0 value to indicate that
	 * crypto init failed, and can't be enabled.
	 */
	host->crypto_capabilities.reg_val = cqhci_readl(host, CQHCI_CCAP);
	host->crypto_cfg_register =
		(u32)host->crypto_capabilities.config_array_ptr * 0x100;
	host->crypto_cap_array =
		devm_kcalloc(mmc_dev(host->mmc),
				host->crypto_capabilities.num_crypto_cap,
				sizeof(host->crypto_cap_array[0]), GFP_KERNEL);
	if (!host->crypto_cap_array) {
		err = -ENOMEM;
		pr_err("%s failed to allocate memory\n", __func__);
		goto out;
	}

	memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported));

	/*
	 * Store all the capabilities now so that we don't need to repeatedly
	 * access the device each time we want to know its capabilities
	 */
	for (cap_idx = 0; cap_idx < host->crypto_capabilities.num_crypto_cap;
	     cap_idx++) {
		host->crypto_cap_array[cap_idx].reg_val =
			cpu_to_le32(cqhci_readl(host,
						 CQHCI_CRYPTOCAP +
						 cap_idx * sizeof(__le32)));
		blk_mode_num = cqhci_blk_crypto_qti_mode_num_for_alg_dusize(
				host->crypto_cap_array[cap_idx].algorithm_id,
				host->crypto_cap_array[cap_idx].key_size);
		if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
			continue;
		crypto_modes_supported[blk_mode_num] |=
				host->crypto_cap_array[cap_idx].sdus_mask * 512;
	}

	host->ksm = keyslot_manager_create(cqhci_num_keyslots(host), ksm_ops,
					crypto_modes_supported, host);

	if (!host->ksm) {
		err = -ENOMEM;
		goto out;
	}
	/*
	 * In case host controller supports cryptographic operations
	 * then, it uses 128bit task descriptor. Upper 64 bits of task
	 * descriptor would be used to pass crypto specific informaton.
	 */
	host->caps |= CQHCI_TASK_DESC_SZ_128;

	return 0;

out:
	/* Indicate that init failed by setting crypto_capabilities to 0 */
	host->crypto_capabilities.reg_val = 0;
	return err;
}

int cqhci_crypto_qti_init_crypto(struct cqhci_host *host,
				const struct keyslot_mgmt_ll_ops *ksm_ops)
{
	int err = 0;
	struct sdhci_host *sdhci = mmc_priv(host->mmc);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	struct resource *cqhci_ice_memres = NULL;

	cqhci_ice_memres = platform_get_resource_byname(msm_host->pdev,
							IORESOURCE_MEM,
							"cqhci_ice");
	if (!cqhci_ice_memres) {
		pr_debug("%s ICE not supported\n", __func__);
		host->icemmio = NULL;
		return PTR_ERR(cqhci_ice_memres);
	}

	host->icemmio = devm_ioremap(&msm_host->pdev->dev,
				     cqhci_ice_memres->start,
				     resource_size(cqhci_ice_memres));
	if (!host->icemmio) {
		pr_err("%s failed to remap ice regs\n", __func__);
		return PTR_ERR(host->icemmio);
	}

	err = cqhci_host_init_crypto_qti_spec(host, &cqhci_crypto_qti_ksm_ops);
	if (err) {
		pr_err("%s: Error initiating crypto capabilities, err %d\n",
					__func__, err);
		return err;
	}

	err = crypto_qti_init_crypto(&msm_host->pdev->dev,
			host->icemmio, (void **)&host->crypto_vops->priv);
	if (err) {
		pr_err("%s: Error initiating crypto, err %d\n",
					__func__, err);
	}
	return err;
}

int cqhci_crypto_qti_debug(struct cqhci_host *host)
{
	return crypto_qti_debug(host->crypto_vops->priv);
}

void cqhci_crypto_qti_set_vops(struct cqhci_host *host)
{
	return cqhci_crypto_set_vops(host, &cqhci_crypto_qti_variant_ops);
}

int cqhci_crypto_qti_resume(struct cqhci_host *host)
{
	return crypto_qti_resume(host->crypto_vops->priv);
}
+26 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
 */

#ifndef _UFSHCD_CRYPTO_QTI_H
#define _UFSHCD_CRYPTO_QTI_H

#include "cqhci-crypto.h"

void cqhci_crypto_qti_enable(struct cqhci_host *host);

void cqhci_crypto_qti_disable(struct cqhci_host *host);

#ifdef CONFIG_BLK_INLINE_ENCRYPTION
int cqhci_crypto_qti_init_crypto(struct cqhci_host *host,
				 const struct keyslot_mgmt_ll_ops *ksm_ops);
#endif

int cqhci_crypto_qti_debug(struct cqhci_host *host);

void cqhci_crypto_qti_set_vops(struct cqhci_host *host);

int cqhci_crypto_qti_resume(struct cqhci_host *host);

#endif /* _UFSHCD_ICE_QTI_H */
+8 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#include "sdhci-msm.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
#include "cqhci-crypto-qti.h"

#define QOS_REMOVE_DELAY_MS	10
#define CORE_POWER		0x0
@@ -2441,6 +2442,13 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
	msm_host->cq_host = cq_host;

	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
	/*
	 * Set the vendor specific ops needed for ICE.
	 * Default implementation if the ops are not set.
	 */
#ifdef CONFIG_MMC_CQHCI_CRYPTO_QTI
	cqhci_crypto_qti_set_vops(cq_host);
#endif

	ret = cqhci_init(cq_host, host->mmc, dma64);
	if (ret) {