Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a9f89463 authored by Kalle Valo's avatar Kalle Valo
Browse files
ath.git patches for 4.16. Major changes:

ath10k

* more preparation work for wcn3990 support

* add memory dump to firmware coredump files

wil6210

* support scheduled scan

* support 40-bit DMA addresses
parents f1398fd2 4c8cf8df
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@ ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o

obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
+1 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
 * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
 * Copyright (c) 2015 The Linux Foundation. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
+1 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
+1 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
+542 −94
Original line number Diff line number Diff line
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
@@ -327,9 +327,9 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
 * Guts of ath10k_ce_send.
 * The caller takes responsibility for any needed locking.
 */
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
				  void *per_transfer_context,
			  u32 buffer,
				  dma_addr_t buffer,
				  unsigned int nbytes,
				  unsigned int transfer_id,
				  unsigned int flags)
@@ -384,6 +384,87 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
	return ret;
}

static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
				     void *per_transfer_context,
				     dma_addr_t buffer,
				     unsigned int nbytes,
				     unsigned int transfer_id,
				     unsigned int flags)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	struct ce_desc_64 *desc, sdesc;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	unsigned int write_index = src_ring->write_index;
	u32 ctrl_addr = ce_state->ctrl_addr;
	__le32 *addr;
	u32 desc_flags = 0;
	int ret = 0;

	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
		return -ESHUTDOWN;

	if (nbytes > ce_state->src_sz_max)
		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
			    __func__, nbytes, ce_state->src_sz_max);

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
		ret = -ENOSR;
		goto exit;
	}

	desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
				      write_index);

	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);

	if (flags & CE_SEND_FLAG_GATHER)
		desc_flags |= CE_DESC_FLAGS_GATHER;

	if (flags & CE_SEND_FLAG_BYTE_SWAP)
		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;

	addr = (__le32 *)&sdesc.addr;

	flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
	addr[0] = __cpu_to_le32(buffer);
	addr[1] = __cpu_to_le32(flags);
	if (flags & CE_SEND_FLAG_GATHER)
		addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
	else
		addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));

	sdesc.nbytes = __cpu_to_le16(nbytes);
	sdesc.flags  = __cpu_to_le16(desc_flags);

	*desc = sdesc;

	src_ring->per_transfer_context[write_index] = per_transfer_context;

	/* Update Source Ring Write Index */
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);

	if (!(flags & CE_SEND_FLAG_GATHER))
		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);

	src_ring->write_index = write_index;
exit:
	return ret;
}

int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
			  void *per_transfer_context,
			  dma_addr_t buffer,
			  unsigned int nbytes,
			  unsigned int transfer_id,
			  unsigned int flags)
{
	return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
				    buffer, nbytes, transfer_id, flags);
}

void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
	struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)

int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
		   void *per_transfer_context,
		   u32 buffer,
		   dma_addr_t buffer,
		   unsigned int nbytes,
		   unsigned int transfer_id,
		   unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}

int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
				   dma_addr_t paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
	return 0;
}

static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
				      void *ctx,
				      dma_addr_t paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index = dest_ring->write_index;
	unsigned int sw_index = dest_ring->sw_index;
	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
	struct ce_desc_64 *desc =
			CE_DEST_RING_TO_DESC_64(base, write_index);
	u32 ctrl_addr = pipe->ctrl_addr;

	lockdep_assert_held(&ce->ce_lock);

	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
		return -ENOSPC;

	desc->addr = __cpu_to_le64(paddr);
	desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);

	desc->nbytes = 0;

	dest_ring->per_transfer_context[write_index] = ctx;
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
	dest_ring->write_index = write_index;

	return 0;
}

void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
{
	struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
	dest_ring->write_index = write_index;
}

int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
			  dma_addr_t paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	int ret;

	spin_lock_bh(&ce->ce_lock);
	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
	ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
	spin_unlock_bh(&ce->ce_lock);

	return ret;
@@ -525,7 +641,8 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 * Guts of ath10k_ce_completed_recv_next.
 * The caller takes responsibility for any necessary locking.
 */
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
static int
	 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
					       void **per_transfer_contextp,
					       unsigned int *nbytesp)
{
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
	return 0;
}

static int
_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
					 void **per_transfer_contextp,
					 unsigned int *nbytesp)
{
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int sw_index = dest_ring->sw_index;
	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
	struct ce_desc_64 *desc =
		CE_DEST_RING_TO_DESC_64(base, sw_index);
	struct ce_desc_64 sdesc;
	u16 nbytes;

	/* Copy in one go for performance reasons */
	sdesc = *desc;

	nbytes = __le16_to_cpu(sdesc.nbytes);
	if (nbytes == 0) {
		/* This closes a relatively unusual race where the Host
		 * sees the updated DRRI before the update to the
		 * corresponding descriptor has completed. We treat this
		 * as a descriptor that is not yet done.
		 */
		return -EIO;
	}

	desc->nbytes = 0;

	/* Return data from completed destination descriptor */
	*nbytesp = nbytes;

	if (per_transfer_contextp)
		*per_transfer_contextp =
			dest_ring->per_transfer_context[sw_index];

	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
	 * So update transfer context all CEs except CE5.
	 */
	if (ce_state->id != 5)
		dest_ring->per_transfer_context[sw_index] = NULL;

	/* Update sw_index */
	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
	dest_ring->sw_index = sw_index;

	return 0;
}

int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
					 void **per_transfer_ctx,
					 unsigned int *nbytesp)
{
	return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
							    per_transfer_ctx,
							    nbytesp);
}

int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
				  unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
	int ret;

	spin_lock_bh(&ce->ce_lock);
	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
	ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
						   per_transfer_contextp,
						   nbytesp);

	spin_unlock_bh(&ce->ce_lock);

	return ret;
}

int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
				       void **per_transfer_contextp,
			       u32 *bufferp)
				       dma_addr_t *bufferp)
{
	struct ath10k_ce_ring *dest_ring;
	unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
	return ret;
}

static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
					  void **per_transfer_contextp,
					  dma_addr_t *bufferp)
{
	struct ath10k_ce_ring *dest_ring;
	unsigned int nentries_mask;
	unsigned int sw_index;
	unsigned int write_index;
	int ret;
	struct ath10k *ar;
	struct ath10k_ce *ce;

	dest_ring = ce_state->dest_ring;

	if (!dest_ring)
		return -EIO;

	ar = ce_state->ar;
	ce = ath10k_ce_priv(ar);

	spin_lock_bh(&ce->ce_lock);

	nentries_mask = dest_ring->nentries_mask;
	sw_index = dest_ring->sw_index;
	write_index = dest_ring->write_index;
	if (write_index != sw_index) {
		struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
		struct ce_desc_64 *desc =
			CE_DEST_RING_TO_DESC_64(base, sw_index);

		/* Return data from completed destination descriptor */
		*bufferp = __le64_to_cpu(desc->addr);

		if (per_transfer_contextp)
			*per_transfer_contextp =
				dest_ring->per_transfer_context[sw_index];

		/* sanity */
		dest_ring->per_transfer_context[sw_index] = NULL;
		desc->nbytes = 0;

		/* Update sw_index */
		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
		dest_ring->sw_index = sw_index;
		ret = 0;
	} else {
		ret = -EIO;
	}

	spin_unlock_bh(&ce->ce_lock);

	return ret;
}

int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       dma_addr_t *bufferp)
{
	return ce_state->ops->ce_revoke_recv_next(ce_state,
						  per_transfer_contextp,
						  bufferp);
}

/*
 * Guts of ath10k_ce_completed_send_next.
 * The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
	return 0;
}

static void ath10k_ce_extract_desc_data(struct ath10k *ar,
					struct ath10k_ce_ring *src_ring,
					u32 sw_index,
					dma_addr_t *bufferp,
					u32 *nbytesp,
					u32 *transfer_idp)
{
		struct ce_desc *base = src_ring->base_addr_owner_space;
		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);

		/* Return data from completed source descriptor */
		*bufferp = __le32_to_cpu(desc->addr);
		*nbytesp = __le16_to_cpu(desc->nbytes);
		*transfer_idp = MS(__le16_to_cpu(desc->flags),
				   CE_DESC_FLAGS_META_DATA);
}

static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
					   struct ath10k_ce_ring *src_ring,
					   u32 sw_index,
					   dma_addr_t *bufferp,
					   u32 *nbytesp,
					   u32 *transfer_idp)
{
		struct ce_desc_64 *base = src_ring->base_addr_owner_space;
		struct ce_desc_64 *desc =
			CE_SRC_RING_TO_DESC_64(base, sw_index);

		/* Return data from completed source descriptor */
		*bufferp = __le64_to_cpu(desc->addr);
		*nbytesp = __le16_to_cpu(desc->nbytes);
		*transfer_idp = MS(__le16_to_cpu(desc->flags),
				   CE_DESC_FLAGS_META_DATA);
}

/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       u32 *bufferp,
			       dma_addr_t *bufferp,
			       unsigned int *nbytesp,
			       unsigned int *transfer_idp)
{
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
	write_index = src_ring->write_index;

	if (write_index != sw_index) {
		struct ce_desc *base = src_ring->base_addr_owner_space;
		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);

		/* Return data from completed source descriptor */
		*bufferp = __le32_to_cpu(desc->addr);
		*nbytesp = __le16_to_cpu(desc->nbytes);
		*transfer_idp = MS(__le16_to_cpu(desc->flags),
						CE_DESC_FLAGS_META_DATA);
		ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
						    bufferp, nbytesp,
						    transfer_idp);

		if (per_transfer_contextp)
			*per_transfer_contextp =
@@ -897,6 +1166,10 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,

	nentries = roundup_pow_of_two(attr->src_nentries);

	if (ar->hw_params.target_64bit)
		memset(src_ring->base_addr_owner_space, 0,
		       nentries * sizeof(struct ce_desc_64));
	else
		memset(src_ring->base_addr_owner_space, 0,
		       nentries * sizeof(struct ce_desc));

@@ -934,6 +1207,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,

	nentries = roundup_pow_of_two(attr->dest_nentries);

	if (ar->hw_params.target_64bit)
		memset(dest_ring->base_addr_owner_space, 0,
		       nentries * sizeof(struct ce_desc_64));
	else
		memset(dest_ring->base_addr_owner_space, 0,
		       nentries * sizeof(struct ce_desc));

@@ -993,11 +1270,56 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,

	src_ring->base_addr_ce_space_unaligned = base_addr;

	src_ring->base_addr_owner_space = PTR_ALIGN(
			src_ring->base_addr_owner_space_unaligned,
	src_ring->base_addr_owner_space =
			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
				  CE_DESC_RING_ALIGN);
	src_ring->base_addr_ce_space =
			ALIGN(src_ring->base_addr_ce_space_unaligned,
			      CE_DESC_RING_ALIGN);

	return src_ring;
}

static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
			    const struct ce_attr *attr)
{
	struct ath10k_ce_ring *src_ring;
	u32 nentries = attr->src_nentries;
	dma_addr_t base_addr;

	nentries = roundup_pow_of_two(nentries);

	src_ring = kzalloc(sizeof(*src_ring) +
			   (nentries *
			    sizeof(*src_ring->per_transfer_context)),
			   GFP_KERNEL);
	if (!src_ring)
		return ERR_PTR(-ENOMEM);

	src_ring->nentries = nentries;
	src_ring->nentries_mask = nentries - 1;

	/* Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	src_ring->base_addr_owner_space_unaligned =
		dma_alloc_coherent(ar->dev,
				   (nentries * sizeof(struct ce_desc_64) +
				    CE_DESC_RING_ALIGN),
				   &base_addr, GFP_KERNEL);
	if (!src_ring->base_addr_owner_space_unaligned) {
		kfree(src_ring);
		return ERR_PTR(-ENOMEM);
	}

	src_ring->base_addr_ce_space_unaligned = base_addr;

	src_ring->base_addr_owner_space =
			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
				  CE_DESC_RING_ALIGN);
	src_ring->base_addr_ce_space = ALIGN(
			src_ring->base_addr_ce_space_unaligned,
	src_ring->base_addr_ce_space =
			ALIGN(src_ring->base_addr_ce_space_unaligned,
			      CE_DESC_RING_ALIGN);

	return src_ring;
@@ -1039,11 +1361,62 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,

	dest_ring->base_addr_ce_space_unaligned = base_addr;

	dest_ring->base_addr_owner_space = PTR_ALIGN(
			dest_ring->base_addr_owner_space_unaligned,
	dest_ring->base_addr_owner_space =
			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
				  CE_DESC_RING_ALIGN);
	dest_ring->base_addr_ce_space =
				ALIGN(dest_ring->base_addr_ce_space_unaligned,
				      CE_DESC_RING_ALIGN);

	return dest_ring;
}

static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
			     const struct ce_attr *attr)
{
	struct ath10k_ce_ring *dest_ring;
	u32 nentries;
	dma_addr_t base_addr;

	nentries = roundup_pow_of_two(attr->dest_nentries);

	dest_ring = kzalloc(sizeof(*dest_ring) +
			    (nentries *
			     sizeof(*dest_ring->per_transfer_context)),
			    GFP_KERNEL);
	if (!dest_ring)
		return ERR_PTR(-ENOMEM);

	dest_ring->nentries = nentries;
	dest_ring->nentries_mask = nentries - 1;

	/* Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	dest_ring->base_addr_owner_space_unaligned =
		dma_alloc_coherent(ar->dev,
				   (nentries * sizeof(struct ce_desc_64) +
				    CE_DESC_RING_ALIGN),
				   &base_addr, GFP_KERNEL);
	if (!dest_ring->base_addr_owner_space_unaligned) {
		kfree(dest_ring);
		return ERR_PTR(-ENOMEM);
	}

	dest_ring->base_addr_ce_space_unaligned = base_addr;

	/* Correctly initialize memory to 0 to prevent garbage
	 * data crashing system when download firmware
	 */
	memset(dest_ring->base_addr_owner_space_unaligned, 0,
	       nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);

	dest_ring->base_addr_owner_space =
			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
				  CE_DESC_RING_ALIGN);
	dest_ring->base_addr_ce_space = ALIGN(
			dest_ring->base_addr_ce_space_unaligned,
	dest_ring->base_addr_ce_space =
			ALIGN(dest_ring->base_addr_ce_space_unaligned,
			      CE_DESC_RING_ALIGN);

	return dest_ring;
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
	ath10k_ce_deinit_dest_ring(ar, ce_id);
}

int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
			 const struct ce_attr *attr)
static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
	int ret;

	/*
	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
	 * additional TX locking checks.
	 *
	 * For the lack of a better place do the check here.
	 */
	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));

	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;

	if (attr->src_nentries)
		ce_state->send_cb = attr->send_cb;

	if (attr->dest_nentries)
		ce_state->recv_cb = attr->recv_cb;

	if (attr->src_nentries) {
		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
		if (IS_ERR(ce_state->src_ring)) {
			ret = PTR_ERR(ce_state->src_ring);
			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
				   ce_id, ret);
			ce_state->src_ring = NULL;
			return ret;
		}
	if (ce_state->src_ring) {
		dma_free_coherent(ar->dev,
				  (ce_state->src_ring->nentries *
				   sizeof(struct ce_desc) +
				   CE_DESC_RING_ALIGN),
				  ce_state->src_ring->base_addr_owner_space,
				  ce_state->src_ring->base_addr_ce_space);
		kfree(ce_state->src_ring);
	}

	if (attr->dest_nentries) {
		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
								attr);
		if (IS_ERR(ce_state->dest_ring)) {
			ret = PTR_ERR(ce_state->dest_ring);
			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
				   ce_id, ret);
			ce_state->dest_ring = NULL;
			return ret;
		}
	if (ce_state->dest_ring) {
		dma_free_coherent(ar->dev,
				  (ce_state->dest_ring->nentries *
				   sizeof(struct ce_desc) +
				   CE_DESC_RING_ALIGN),
				  ce_state->dest_ring->base_addr_owner_space,
				  ce_state->dest_ring->base_addr_ce_space);
		kfree(ce_state->dest_ring);
	}

	return 0;
	ce_state->src_ring = NULL;
	ce_state->dest_ring = NULL;
}

void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
	if (ce_state->src_ring) {
		dma_free_coherent(ar->dev,
				  (ce_state->src_ring->nentries *
				   sizeof(struct ce_desc) +
				   sizeof(struct ce_desc_64) +
				   CE_DESC_RING_ALIGN),
				  ce_state->src_ring->base_addr_owner_space,
				  ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
	if (ce_state->dest_ring) {
		dma_free_coherent(ar->dev,
				  (ce_state->dest_ring->nentries *
				   sizeof(struct ce_desc) +
				   sizeof(struct ce_desc_64) +
				   CE_DESC_RING_ALIGN),
				  ce_state->dest_ring->base_addr_owner_space,
				  ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
	ce_state->dest_ring = NULL;
}

void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];

	ce_state->ops->ce_free_pipe(ar, ce_id);
}

void ath10k_ce_dump_registers(struct ath10k *ar,
			      struct ath10k_fw_crash_data *crash_data)
{
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,

	spin_unlock_bh(&ce->ce_lock);
}

static const struct ath10k_ce_ops ce_ops = {
	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
	.ce_rx_post_buf = __ath10k_ce_rx_post_buf,
	.ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
	.ce_extract_desc_data = ath10k_ce_extract_desc_data,
	.ce_free_pipe = _ath10k_ce_free_pipe,
	.ce_send_nolock = _ath10k_ce_send_nolock,
};

static const struct ath10k_ce_ops ce_64_ops = {
	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
	.ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
	.ce_completed_recv_next_nolock =
				_ath10k_ce_completed_recv_next_nolock_64,
	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
	.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
	.ce_free_pipe = _ath10k_ce_free_pipe_64,
	.ce_send_nolock = _ath10k_ce_send_nolock_64,
};

static void ath10k_ce_set_ops(struct ath10k *ar,
			      struct ath10k_ce_pipe *ce_state)
{
	switch (ar->hw_rev) {
	case ATH10K_HW_WCN3990:
		ce_state->ops = &ce_64_ops;
		break;
	default:
		ce_state->ops = &ce_ops;
		break;
	}
}

int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
			 const struct ce_attr *attr)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
	int ret;

	ath10k_ce_set_ops(ar, ce_state);
	/* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
	 * additional TX locking checks.
	 *
	 * For the lack of a better place do the check here.
	 */
	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));

	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;

	if (attr->src_nentries)
		ce_state->send_cb = attr->send_cb;

	if (attr->dest_nentries)
		ce_state->recv_cb = attr->recv_cb;

	if (attr->src_nentries) {
		ce_state->src_ring =
			ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
		if (IS_ERR(ce_state->src_ring)) {
			ret = PTR_ERR(ce_state->src_ring);
			ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
				   ce_id, ret);
			ce_state->src_ring = NULL;
			return ret;
		}
	}

	if (attr->dest_nentries) {
		ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
									ce_id,
									attr);
		if (IS_ERR(ce_state->dest_ring)) {
			ret = PTR_ERR(ce_state->dest_ring);
			ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
				   ce_id, ret);
			ce_state->dest_ring = NULL;
			return ret;
		}
	}

	return 0;
}
Loading