Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6dad4e8a authored by Atul Gupta's avatar Atul Gupta Committed by Herbert Xu
Browse files

chcr: Add support for Inline IPSec



register xfrmdev_ops callbacks, Send IPsec tunneled data
to HW for inline processing.
The driver use hardware crypto accelerator to encrypt and
generate ICV for the transmitted packet in Inline mode.

Signed-off-by: default avatarAtul Gupta <atul.gupta@chelsio.com>
Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarGanesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a6ec572b
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -18,3 +18,13 @@ config CRYPTO_DEV_CHELSIO

	  To compile this driver as a module, choose M here: the module
	  will be called chcr.

config CHELSIO_IPSEC_INLINE
        bool "Chelsio IPSec XFRM Tx crypto offload"
        depends on CHELSIO_T4
	depends on CRYPTO_DEV_CHELSIO
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
        default n
        ---help---
          Enable support for IPSec Tx Inline.
+1 −0
Original line number Diff line number Diff line
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4

obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs :=  chcr_core.o chcr_algo.o
chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
+113 −108
Original line number Diff line number Diff line
@@ -73,6 +73,25 @@

#define IV AES_BLOCK_SIZE

unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
				96, 112, 120, 136, 144, 160, 168, 184,
				192, 208, 216, 232, 240, 256, 264, 280,
				288, 304, 312, 328, 336, 352, 360, 376};

unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
				112, 112, 128, 128, 144, 144, 160, 160,
				192, 192, 208, 208, 224, 224, 240, 240,
				272, 272, 288, 288, 304, 304, 320, 320};

static u32 round_constant[11] = {
	0x01000000, 0x02000000, 0x04000000, 0x08000000,
	0x10000000, 0x20000000, 0x40000000, 0x80000000,
	0x1B000000, 0x36000000, 0x6C000000
};

static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
				   unsigned char *input, int err);

static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
	return ctx->crypto_ctx->aeadctx;
@@ -108,18 +127,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
	return (skb->len <= SGE_MAX_WR_LEN);
}

/*
 *	sgl_len - calculates the size of an SGL of the given capacity
 *	@n: the number of SGL entries
 *	Calculates the number of flits needed for a scatter/gather list that
 *	can hold the given number of entries.
 */
static inline unsigned int sgl_len(unsigned int n)
{
	n--;
	return (3 * n) / 2 + (n & 1) + 2;
}

static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
			 unsigned int entlen,
			 unsigned int skip)
@@ -182,30 +189,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
	}
out:
	req->base.complete(&req->base, err);

}

static inline void chcr_handle_aead_resp(struct aead_request *req,
					 unsigned char *input,
					 int err)
static inline int get_aead_subtype(struct crypto_aead *aead)
{
	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));


	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
	if (reqctx->b0_dma)
		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
				 reqctx->b0_len, DMA_BIDIRECTIONAL);
	if (reqctx->verify == VERIFY_SW) {
		chcr_verify_tag(req, input, &err);
		reqctx->verify = VERIFY_HW;
	struct aead_alg *alg = crypto_aead_alg(aead);
	struct chcr_alg_template *chcr_crypto_alg =
		container_of(alg, struct chcr_alg_template, alg.aead);
	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
	req->base.complete(&req->base, err);

}
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
	u8 temp[SHA512_DIGEST_SIZE];
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -230,6 +224,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
		*err = 0;
}

static inline void chcr_handle_aead_resp(struct aead_request *req,
					 unsigned char *input,
					 int err)
{
	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));

	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
	if (reqctx->b0_dma)
		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
				 reqctx->b0_len, DMA_BIDIRECTIONAL);
	if (reqctx->verify == VERIFY_SW) {
		chcr_verify_tag(req, input, &err);
		reqctx->verify = VERIFY_HW;
	}
	req->base.complete(&req->base, err);
}

/*
 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
 *	@req: crypto request
@@ -594,14 +607,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
	}
}

static inline int get_aead_subtype(struct crypto_aead *aead)
{
	struct aead_alg *alg = crypto_aead_alg(aead);
	struct chcr_alg_template *chcr_crypto_alg =
		container_of(alg, struct chcr_alg_template, alg.aead);
	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}

static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg = tfm->__crt_alg;
@@ -1100,7 +1105,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,

}


static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
				   unsigned char *input, int err)
{
@@ -2198,7 +2202,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
	return ERR_PTR(error);
}

static int chcr_aead_dma_map(struct device *dev,
int chcr_aead_dma_map(struct device *dev,
		      struct aead_request *req,
		      unsigned short op_type)
{
@@ -2242,7 +2246,7 @@ static int chcr_aead_dma_map(struct device *dev,
	return -ENOMEM;
}

static void chcr_aead_dma_unmap(struct device *dev,
void chcr_aead_dma_unmap(struct device *dev,
			 struct aead_request *req,
			 unsigned short op_type)
{
@@ -2269,7 +2273,7 @@ static void chcr_aead_dma_unmap(struct device *dev,
	}
}

static inline void chcr_add_aead_src_ent(struct aead_request *req,
void chcr_add_aead_src_ent(struct aead_request *req,
			   struct ulptx_sgl *ulptx,
			   unsigned int assoclen,
			   unsigned short op_type)
@@ -2304,7 +2308,7 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
	}
}

static inline void chcr_add_aead_dst_ent(struct aead_request *req,
void chcr_add_aead_dst_ent(struct aead_request *req,
			   struct cpl_rx_phys_dsgl *phys_cpl,
			   unsigned int assoclen,
			   unsigned short op_type,
@@ -2326,7 +2330,7 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
	dsgl_walk_end(&dsgl_walk, qid);
}

static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
			     struct ulptx_sgl *ulptx,
			     struct  cipher_wr_param *wrparam)
{
@@ -2351,7 +2355,7 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
	}
}

static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
			     struct cpl_rx_phys_dsgl *phys_cpl,
			     struct  cipher_wr_param *wrparam,
			     unsigned short qid)
@@ -2369,7 +2373,7 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
	dsgl_walk_end(&dsgl_walk, qid);
}

static inline void chcr_add_hash_src_ent(struct ahash_request *req,
void chcr_add_hash_src_ent(struct ahash_request *req,
			   struct ulptx_sgl *ulptx,
			   struct hash_wr_param *param)
{
@@ -2398,8 +2402,7 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
	}
}


static inline int chcr_hash_dma_map(struct device *dev,
int chcr_hash_dma_map(struct device *dev,
		      struct ahash_request *req)
{
	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2415,7 +2418,7 @@ static inline int chcr_hash_dma_map(struct device *dev,
	return 0;
}

static inline void chcr_hash_dma_unmap(struct device *dev,
void chcr_hash_dma_unmap(struct device *dev,
			 struct ahash_request *req)
{
	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2429,8 +2432,7 @@ static inline void chcr_hash_dma_unmap(struct device *dev,

}


static int chcr_cipher_dma_map(struct device *dev,
int chcr_cipher_dma_map(struct device *dev,
			struct ablkcipher_request *req)
{
	int error;
@@ -2465,7 +2467,8 @@ static int chcr_cipher_dma_map(struct device *dev,
	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
	return -ENOMEM;
}
static void chcr_cipher_dma_unmap(struct device *dev,

void chcr_cipher_dma_unmap(struct device *dev,
			   struct ablkcipher_request *req)
{
	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -3371,6 +3374,40 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
	aeadctx->enckey_len = 0;
	return -EINVAL;
}

static int chcr_aead_op(struct aead_request *req,
			unsigned short op_type,
			int size,
			create_wr_t create_wr_fn)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct uld_ctx *u_ctx;
	struct sk_buff *skb;

	if (!a_ctx(tfm)->dev) {
		pr_err("chcr : %s : No crypto device.\n", __func__);
		return -ENXIO;
	}
	u_ctx = ULD_CTX(a_ctx(tfm));
	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
				   a_ctx(tfm)->tx_qidx)) {
		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
			return -EBUSY;
	}

	/* Form a WR from req */
	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
			   op_type);

	if (IS_ERR(skb) || !skb)
		return PTR_ERR(skb);

	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
	chcr_send_wr(skb);
	return -EINPROGRESS;
}

static int chcr_aead_encrypt(struct aead_request *req)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3423,38 +3460,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
	}
}

static int chcr_aead_op(struct aead_request *req,
			  unsigned short op_type,
			  int size,
			  create_wr_t create_wr_fn)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct uld_ctx *u_ctx;
	struct sk_buff *skb;

	if (!a_ctx(tfm)->dev) {
		pr_err("chcr : %s : No crypto device.\n", __func__);
		return -ENXIO;
	}
	u_ctx = ULD_CTX(a_ctx(tfm));
	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
				   a_ctx(tfm)->tx_qidx)) {
		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
			return -EBUSY;
	}

	/* Form a WR from req */
	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
			   op_type);

	if (IS_ERR(skb) || !skb)
		return PTR_ERR(skb);

	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
	chcr_send_wr(skb);
	return -EINPROGRESS;
}
static struct chcr_alg_template driver_algs[] = {
	/* AES-CBC */
	{
+0 −15
Original line number Diff line number Diff line
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
	((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))

unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
				96, 112, 120, 136, 144, 160, 168, 184,
				192, 208, 216, 232, 240, 256, 264, 280,
				288, 304, 312, 328, 336, 352, 360, 376};
unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
				112, 112, 128, 128, 144, 144, 160, 160,
				192, 192, 208, 208, 224, 224, 240, 240,
				272, 272, 288, 288, 304, 304, 320, 320};

struct algo_param {
	unsigned int auth_mode;
	unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
	return *(u32 *)(&bytes[0]);
}

static u32 round_constant[11] = {
	0x01000000, 0x02000000, 0x04000000, 0x08000000,
	0x10000000, 0x20000000, 0x40000000, 0x80000000,
	0x1B000000, 0x36000000, 0x6C000000
};

#endif /* __CHCR_ALGO_H__ */
+14 −0
Original line number Diff line number Diff line
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
	.add = chcr_uld_add,
	.state_change = chcr_uld_state_change,
	.rx_handler = chcr_uld_rx_handler,
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
	.tx_handler = chcr_uld_tx_handler,
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};

struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
		goto out;
	}
	u_ctx->lldi = *lld;
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
	if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
		chcr_add_xfrmops(lld);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
	return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
	return 0;
}

#ifdef CONFIG_CHELSIO_IPSEC_INLINE
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
{
	return chcr_ipsec_xmit(skb, dev);
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */

static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
	struct uld_ctx *u_ctx = handle;
Loading