Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1caf99c7 authored by Gilad Broner's avatar Gilad Broner Committed by Gerrit - the friendly Code Review server
Browse files

scsi: ufs: add crypto related operations



UFS specification has been updated and crypto operations
were added. Update the UFS driver code with the new registers
and sequences to facilitate future usage.

Change-Id: I020870f628977c3ebad6cc0afaef3cb1cdd15063
Signed-off-by: default avatarGilad Broner <gbroner@codeaurora.org>
parent abae80de
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -210,6 +210,35 @@ static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write)
	return false;
}

int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
		struct scsi_cmnd *cmd, u8 *cc_index, bool *enable)
{
	struct ice_data_setting ice_set;
	char cmd_op = cmd->cmnd[0];
	int err;

	if (qcom_host->ice.vops->config) {
		memset(&ice_set, 0, sizeof(ice_set));
		err = qcom_host->ice.vops->config(qcom_host->ice.pdev,
			cmd->request, &ice_set);
		if (err) {
			dev_err(qcom_host->hba->dev,
				"%s: error in ice_vops->config %d\n",
				__func__, err);
			return err;
		}

		if (ufs_qcom_is_data_cmd(cmd_op, true))
			*enable = !ice_set.encr_bypass;
		else if (ufs_qcom_is_data_cmd(cmd_op, false))
			*enable = !ice_set.decr_bypass;

		if (ice_set.crypto_data.key_index >= 0)
			*cc_index = (u8)ice_set.crypto_data.key_index;
	}
	return 0;
}

/**
 * ufs_qcom_ice_cfg() - configures UFS's ICE registers for an ICE transaction
 * @qcom_host:	Pointer to a UFS QCom internal host structure.
+2 −0
Original line number Diff line number Diff line
@@ -72,6 +72,8 @@ enum {
#ifdef CONFIG_SCSI_UFS_QCOM_ICE
int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
			   struct scsi_cmnd *cmd, u8 *cc_index, bool *enable);
int ufs_qcom_ice_cfg(struct ufs_qcom_host *qcom_host, struct scsi_cmnd *cmd);
int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
+25 −1
Original line number Diff line number Diff line
@@ -374,7 +374,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
		/* check if UFS PHY moved from DISABLED to HIBERN8 */
		err = ufs_qcom_check_hibern8(hba);
		ufs_qcom_enable_hw_clk_gating(hba);

		break;
	default:
		dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -677,6 +676,30 @@ out:
	return ret;
}

static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
{
	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
	struct request *req;
	int ret;

	if (lrbp->cmd && lrbp->cmd->request)
		req = lrbp->cmd->request;
	else
		return 0;

	/* Use request LBA as the DUN value */
	if (req->bio)
		*dun = req->bio->bi_iter.bi_sector;

	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
	if (ret)
		dev_err(hba->dev, "%s: ufs_qcom_ice_req_setup failed (%d)\n",
			__func__, ret);

	return ret;
}

static
int ufs_qcom_crytpo_engine_cfg(struct ufs_hba *hba, unsigned int task_tag)
{
@@ -2245,6 +2268,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
};

static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
	.crypto_req_setup	= ufs_qcom_crypto_req_setup,
	.crypto_engine_cfg	  = ufs_qcom_crytpo_engine_cfg,
	.crypto_engine_reset	  = ufs_qcom_crytpo_engine_reset,
	.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
+55 −5
Original line number Diff line number Diff line
@@ -737,6 +737,9 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
		intr_mask = INTERRUPT_MASK_ALL_VER_21;
	}

	if (!ufshcd_is_crypto_supported(hba))
		intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;

	return intr_mask;
}

@@ -1000,7 +1003,11 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
 */
static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
	ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
	u32 val = CONTROLLER_ENABLE;

	if (ufshcd_is_crypto_supported(hba))
		val |= CRYPTO_GENERAL_ENABLE;
	ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}

/**
@@ -2265,15 +2272,52 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}

static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
		struct ufshcd_lrb *lrbp)
{
	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
	u8 cc_index = 0;
	bool enable = false;
	u64 dun = 0;
	int ret;

	/*
	 * Call vendor specific code to get crypto info for this request:
	 * enable, crypto config. index, DUN.
	 * If bypass is set, don't bother setting the other fields.
	 */
	ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
	if (ret) {
		dev_err(hba->dev,
			"%s: failed to setup crypto request (%d)\n",
			__func__, ret);
		return ret;
	}

	if (!enable)
		goto out;

	req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
	if (lrbp->cmd->request && lrbp->cmd->request->bio)
		dun = lrbp->cmd->request->bio->bi_iter.bi_sector;

	req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
	req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
out:
	return 0;
}

/**
 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
 * descriptor according to request
 * @hba: per adapter instance
 * @lrbp: pointer to local reference block
 * @upiu_flags: flags required in the header
 * @cmd_dir: requests data direction
 */
static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
		u32 *upiu_flags, enum dma_data_direction cmd_dir)
static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
	struct ufshcd_lrb *lrbp, u32 *upiu_flags,
	enum dma_data_direction cmd_dir)
{
	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
	u32 data_direction;
@@ -2310,6 +2354,9 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
	req_desc->header.dword_3 = 0;

	req_desc->prd_table_length = 0;

	if (ufshcd_is_crypto_supported(hba))
		ufshcd_prepare_crypto_utrd(hba, lrbp);
}

/**
@@ -2412,7 +2459,7 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
	switch (lrbp->command_type) {
	case UTP_CMD_TYPE_SCSI:
		if (likely(lrbp->cmd)) {
			ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
			ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
					lrbp->cmd->sc_data_direction);
			ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
		} else {
@@ -2420,7 +2467,7 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
		}
		break;
	case UTP_CMD_TYPE_DEV_MANAGE:
		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
		ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE);
		if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
			ufshcd_prepare_utp_query_req_upiu(
					hba, lrbp, upiu_flags);
@@ -4832,6 +4879,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
	case OCS_MISMATCH_RESP_UPIU_SIZE:
	case OCS_PEER_COMM_FAILURE:
	case OCS_FATAL_ERROR:
	case OCS_DEVICE_FATAL_ERROR:
	case OCS_INVALID_CRYPTO_CONFIG:
	case OCS_GENERAL_CRYPTO_ERROR:
	default:
		result |= DID_ERROR << 16;
		dev_err(hba->dev,
+22 −10
Original line number Diff line number Diff line
@@ -338,18 +338,15 @@ struct ufs_hba_variant_ops {

/**
 * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks
 * @crypto_req_setup:	retreieve the necessary cryptographic arguments to setup
			a requests's transfer descriptor.
 * @crypto_engine_cfg: configure cryptographic engine according to tag parameter
 * @crypto_engine_eh: cryptographic engine error handling.
 *                Return true is it detects an error, false on
 *                success
 * @crypto_engine_get_err: returns the saved error status of the
 *                         cryptographic engine.If a positive
 *                         value is returned, host controller
 *                         should be reset.
 * @crypto_engine_reset_err: resets the saved error status of
 *                         the cryptographic engine
 * @crypto_engine_reset: perform reset to the cryptographic engine
 * @crypto_engine_get_status: get errors status of the cryptographic engine
 */
struct ufs_hba_crypto_variant_ops {
	int	(*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
				    u8 *cc_index, bool *enable, u64 *dun);
	int	(*crypto_engine_cfg)(struct ufs_hba *, unsigned int);
	int	(*crypto_engine_reset)(struct ufs_hba *);
	int	(*crypto_engine_get_status)(struct ufs_hba *, u32 *);
@@ -925,6 +922,11 @@ static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
		return false;
}

static inline bool ufshcd_is_crypto_supported(struct ufs_hba *hba)
{
	return !!(hba->capabilities & MASK_CRYPTO_SUPPORT);
}

#define ufshcd_writel(hba, val, reg)	\
	writel_relaxed((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg)	\
@@ -1213,6 +1215,16 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
}
#endif

static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba,
	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
{
	if (hba->var && hba->var->crypto_vops &&
		hba->var->crypto_vops->crypto_req_setup)
		return hba->var->crypto_vops->crypto_req_setup(hba, lrbp,
			cc_index, enable, dun);
	return 0;
}

static inline int ufshcd_vops_crypto_engine_cfg(struct ufs_hba *hba,
		unsigned int task_tag)
{
Loading