Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 98497bb2 authored by John W. Linville's avatar John W. Linville
Browse files

Merge branch 'for-linville' of git://github.com/kvalo/ath

parents b75ff5e8 763b8cd3
Loading
Loading
Loading
Loading
+148 −143
Original line number Diff line number Diff line
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
						      u32 ce_ctrl_addr,
						      unsigned int n)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	void __iomem *indicator_addr;

	if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
		return;
	}

	/* workaround for QCA988x_1.0 HW CE */
	indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;

	if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
		iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
	} else {
		unsigned long irq_flags;
		local_irq_save(irq_flags);
		iowrite32(1, indicator_addr);

		/*
		 * PCIE write waits for ACK in IPQ8K, there is no
		 * need to read back value.
		 */
		(void)ioread32(indicator_addr);
		(void)ioread32(indicator_addr); /* conservative */

		ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);

		iowrite32(0, indicator_addr);
		local_irq_restore(irq_flags);
	}
}

static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
 * ath10k_ce_sendlist_send.
 * The caller takes responsibility for any needed locking.
 */
static int ath10k_ce_send_nolock(struct ce_state *ce_state,
static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
				 void *per_transfer_context,
				 u32 buffer,
				 unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
				 unsigned int flags)
{
	struct ath10k *ar = ce_state->ar;
	struct ce_ring_state *src_ring = ce_state->src_ring;
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	struct ce_desc *desc, *sdesc;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
@@ -306,7 +277,9 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
		ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
			    __func__, nbytes, ce_state->src_sz_max);

	ath10k_pci_wake(ar);
	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
@@ -346,7 +319,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
	return ret;
}

int ath10k_ce_send(struct ce_state *ce_state,
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
		   void *per_transfer_context,
		   u32 buffer,
		   unsigned int nbytes,
@@ -378,12 +351,12 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
	sendlist->num_items++;
}

int ath10k_ce_sendlist_send(struct ce_state *ce_state,
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
			    void *per_transfer_context,
			    struct ce_sendlist *sendlist,
			    unsigned int transfer_id)
{
	struct ce_ring_state *src_ring = ce_state->src_ring;
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	struct ce_sendlist_item *item;
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -431,11 +404,11 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
	return ret;
}

int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
			       void *per_recv_context,
			       u32 buffer)
{
	struct ce_ring_state *dest_ring = ce_state->dest_ring;
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +421,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
	write_index = dest_ring->write_index;
	sw_index = dest_ring->sw_index;

	ath10k_pci_wake(ar);
	ret = ath10k_pci_wake(ar);
	if (ret)
		goto out;

	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
		struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +445,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
		ret = -EIO;
	}
	ath10k_pci_sleep(ar);

out:
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
@@ -479,14 +456,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
 * Guts of ath10k_ce_completed_recv_next.
 * The caller takes responsibility for any necessary locking.
 */
static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
						void **per_transfer_contextp,
						u32 *bufferp,
						unsigned int *nbytesp,
						unsigned int *transfer_idp,
						unsigned int *flagsp)
{
	struct ce_ring_state *dest_ring = ce_state->dest_ring;
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int sw_index = dest_ring->sw_index;

@@ -535,7 +512,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
	return 0;
}

int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
				  u32 *bufferp,
				  unsigned int *nbytesp,
@@ -556,11 +533,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
	return ret;
}

int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       u32 *bufferp)
{
	struct ce_ring_state *dest_ring;
	struct ath10k_ce_ring *dest_ring;
	unsigned int nentries_mask;
	unsigned int sw_index;
	unsigned int write_index;
@@ -612,19 +589,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
 * Guts of ath10k_ce_completed_send_next.
 * The caller takes responsibility for any necessary locking.
 */
static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
						void **per_transfer_contextp,
						u32 *bufferp,
						unsigned int *nbytesp,
						unsigned int *transfer_idp)
{
	struct ce_ring_state *src_ring = ce_state->src_ring;
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	struct ce_desc *sdesc, *sbase;
	unsigned int read_index;
	int ret = -EIO;
	int ret;

	if (src_ring->hw_index == sw_index) {
		/*
@@ -634,17 +612,25 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
		 * the SW has really caught up to the HW, or if the cached
		 * value of the HW index has become stale.
		 */
		ath10k_pci_wake(ar);

		ret = ath10k_pci_wake(ar);
		if (ret)
			return ret;

		src_ring->hw_index =
			ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
		src_ring->hw_index &= nentries_mask;

		ath10k_pci_sleep(ar);
	}

	read_index = src_ring->hw_index;

	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
		struct ce_desc *sbase = src_ring->shadow_base;
		struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
	if ((read_index == sw_index) || (read_index == 0xffffffff))
		return -EIO;

	sbase = src_ring->shadow_base;
	sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);

	/* Return data from completed source descriptor */
	*bufferp = __le32_to_cpu(sdesc->addr);
@@ -662,20 +648,18 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
	/* Update sw_index */
	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
	src_ring->sw_index = sw_index;
		ret = 0;
	}

	return ret;
	return 0;
}

/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       u32 *bufferp,
			       unsigned int *nbytesp,
			       unsigned int *transfer_idp)
{
	struct ce_ring_state *src_ring;
	struct ath10k_ce_ring *src_ring;
	unsigned int nentries_mask;
	unsigned int sw_index;
	unsigned int write_index;
@@ -727,7 +711,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
	return ret;
}

int ath10k_ce_completed_send_next(struct ce_state *ce_state,
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
				  u32 *bufferp,
				  unsigned int *nbytesp,
@@ -756,15 +740,19 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ce_state->ctrl_addr;
	void *transfer_context;
	u32 buf;
	unsigned int nbytes;
	unsigned int id;
	unsigned int flags;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	ath10k_pci_wake(ar);
	spin_lock_bh(&ar_pci->ce_lock);

	/* Clear the copy-complete interrupts that will be handled here. */
@@ -823,10 +811,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ce_id;
	int ce_id, ret;
	u32 intr_summary;

	ath10k_pci_wake(ar);
	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	intr_summary = CE_INTERRUPT_SUMMARY(ar);

	for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +840,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 *
 * Called with ce_lock held.
 */
static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
						int disable_copy_compl_intr)
{
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	int ret;

	ath10k_pci_wake(ar);
	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	if ((!disable_copy_compl_intr) &&
	    (ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +865,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ce_id;
	int ce_id, ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	ath10k_pci_wake(ar);
	for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
		struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
		struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
		u32 ctrl_addr = ce_state->ctrl_addr;

		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,8 +880,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
	ath10k_pci_sleep(ar);
}

void ath10k_ce_send_cb_register(struct ce_state *ce_state,
				void (*send_cb) (struct ce_state *ce_state,
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*send_cb)(struct ath10k_ce_pipe *ce_state,
						void *transfer_context,
						u32 buffer,
						unsigned int nbytes,
@@ -900,8 +897,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
	spin_unlock_bh(&ar_pci->ce_lock);
}

void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
				void (*recv_cb) (struct ce_state *ce_state,
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
						void *transfer_context,
						u32 buffer,
						unsigned int nbytes,
@@ -919,11 +916,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,

static int ath10k_ce_init_src_ring(struct ath10k *ar,
				   unsigned int ce_id,
				   struct ce_state *ce_state,
				   struct ath10k_ce_pipe *ce_state,
				   const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ce_ring_state *src_ring;
	struct ath10k_ce_ring *src_ring;
	unsigned int nentries = attr->src_nentries;
	unsigned int ce_nbytes;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +934,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
		return 0;
	}

	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
	if (ptr == NULL)
		return -ENOMEM;

	ce_state->src_ring = (struct ce_ring_state *)ptr;
	ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
	src_ring = ce_state->src_ring;

	ptr += sizeof(struct ce_ring_state);
	ptr += sizeof(struct ath10k_ce_ring);
	src_ring->nentries = nentries;
	src_ring->nentries_mask = nentries - 1;

	ath10k_pci_wake(ar);
	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
	src_ring->sw_index &= src_ring->nentries_mask;
	src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +953,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
	src_ring->write_index =
		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
	src_ring->write_index &= src_ring->nentries_mask;
	ath10k_pci_sleep(ar);

	src_ring->per_transfer_context = (void **)ptr;

@@ -970,6 +965,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
				     (nentries * sizeof(struct ce_desc) +
				      CE_DESC_RING_ALIGN),
				     &base_addr);
	if (!src_ring->base_addr_owner_space_unaligned) {
		kfree(ce_state->src_ring);
		ce_state->src_ring = NULL;
		return -ENOMEM;
	}

	src_ring->base_addr_ce_space_unaligned = base_addr;

	src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +987,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
	src_ring->shadow_base_unaligned =
		kmalloc((nentries * sizeof(struct ce_desc) +
			 CE_DESC_RING_ALIGN), GFP_KERNEL);
	if (!src_ring->shadow_base_unaligned) {
		pci_free_consistent(ar_pci->pdev,
				    (nentries * sizeof(struct ce_desc) +
				     CE_DESC_RING_ALIGN),
				    src_ring->base_addr_owner_space,
				    src_ring->base_addr_ce_space);
		kfree(ce_state->src_ring);
		ce_state->src_ring = NULL;
		return -ENOMEM;
	}

	src_ring->shadow_base = PTR_ALIGN(
			src_ring->shadow_base_unaligned,
			CE_DESC_RING_ALIGN);

	ath10k_pci_wake(ar);
	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
					 src_ring->base_addr_ce_space);
	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +1009,17 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
	ath10k_pci_sleep(ar);

	return 0;
}

static int ath10k_ce_init_dest_ring(struct ath10k *ar,
				    unsigned int ce_id,
				    struct ce_state *ce_state,
				    struct ath10k_ce_pipe *ce_state,
				    const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ce_ring_state *dest_ring;
	struct ath10k_ce_ring *dest_ring;
	unsigned int nentries = attr->dest_nentries;
	unsigned int ce_nbytes;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +1033,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
		return 0;
	}

	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
	if (ptr == NULL)
		return -ENOMEM;

	ce_state->dest_ring = (struct ce_ring_state *)ptr;
	ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
	dest_ring = ce_state->dest_ring;

	ptr += sizeof(struct ce_ring_state);
	ptr += sizeof(struct ath10k_ce_ring);
	dest_ring->nentries = nentries;
	dest_ring->nentries_mask = nentries - 1;

	ath10k_pci_wake(ar);
	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
	dest_ring->sw_index &= dest_ring->nentries_mask;
	dest_ring->write_index =
		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
	dest_ring->write_index &= dest_ring->nentries_mask;
	ath10k_pci_sleep(ar);

	dest_ring->per_transfer_context = (void **)ptr;

@@ -1055,6 +1062,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
				     (nentries * sizeof(struct ce_desc) +
				      CE_DESC_RING_ALIGN),
				     &base_addr);
	if (!dest_ring->base_addr_owner_space_unaligned) {
		kfree(ce_state->dest_ring);
		ce_state->dest_ring = NULL;
		return -ENOMEM;
	}

	dest_ring->base_addr_ce_space_unaligned = base_addr;

	/*
@@ -1071,44 +1084,31 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
			dest_ring->base_addr_ce_space_unaligned,
			CE_DESC_RING_ALIGN);

	ath10k_pci_wake(ar);
	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
					  dest_ring->base_addr_ce_space);
	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
	ath10k_pci_sleep(ar);

	return 0;
}

static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
					     unsigned int ce_id,
					     const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ce_state *ce_state = NULL;
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);

	spin_lock_bh(&ar_pci->ce_lock);

	if (!ar_pci->ce_id_to_state[ce_id]) {
		ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
		if (ce_state == NULL) {
			spin_unlock_bh(&ar_pci->ce_lock);
			return NULL;
		}

		ar_pci->ce_id_to_state[ce_id] = ce_state;
	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ctrl_addr;
		ce_state->state = CE_RUNNING;
		/* Save attribute flags */
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;
	}

	spin_unlock_bh(&ar_pci->ce_lock);

@@ -1122,12 +1122,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
 * initialization. It may be that only one side or the other is
 * initialized by software/firmware.
 */
struct ce_state *ath10k_ce_init(struct ath10k *ar,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
				unsigned int ce_id,
				const struct ce_attr *attr)
{
	struct ce_state *ce_state;
	struct ath10k_ce_pipe *ce_state;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return NULL;

	ce_state = ath10k_ce_init_state(ar, ce_id, attr);
	if (!ce_state) {
@@ -1136,40 +1141,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
	}

	if (attr->src_nentries) {
		if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
			ath10k_err("Failed to initialize CE src ring for ID: %d\n",
				   ce_id);
		ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
				   ce_id, ret);
			ath10k_ce_deinit(ce_state);
			return NULL;
		}
	}

	if (attr->dest_nentries) {
		if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
			ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
				   ce_id);
		ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
				   ce_id, ret);
			ath10k_ce_deinit(ce_state);
			return NULL;
		}
	}

	/* Enable CE error interrupts */
	ath10k_pci_wake(ar);
	ath10k_ce_error_intr_enable(ar, ctrl_addr);

	ath10k_pci_sleep(ar);

	return ce_state;
}

void ath10k_ce_deinit(struct ce_state *ce_state)
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
	unsigned int ce_id = ce_state->id;
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);

	ce_state->state = CE_UNUSED;
	ar_pci->ce_id_to_state[ce_id] = NULL;

	if (ce_state->src_ring) {
		kfree(ce_state->src_ring->shadow_base_unaligned);
		pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1193,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
				    ce_state->dest_ring->base_addr_ce_space);
		kfree(ce_state->dest_ring);
	}
	kfree(ce_state);

	ce_state->src_ring = NULL;
	ce_state->dest_ring = NULL;
}
+29 −45
Original line number Diff line number Diff line
@@ -36,16 +36,9 @@
 * how to use copy engines.
 */

struct ce_state;
struct ath10k_ce_pipe;


/* Copy Engine operational state */
enum ce_op_state {
	CE_UNUSED,
	CE_PAUSED,
	CE_RUNNING,
};

#define CE_DESC_FLAGS_GATHER         (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +50,7 @@ struct ce_desc {
	__le16 flags; /* %CE_DESC_FLAGS_ */
};

/* Copy Engine Ring internal state */
struct ce_ring_state {
struct ath10k_ce_ring {
	/* Number of entries in this ring; must be power of 2 */
	unsigned int nentries;
	unsigned int nentries_mask;
@@ -116,22 +108,20 @@ struct ce_ring_state {
	void **per_transfer_context;
};

/* Copy Engine internal state */
struct ce_state {
struct ath10k_ce_pipe {
	struct ath10k *ar;
	unsigned int id;

	unsigned int attr_flags;

	u32 ctrl_addr;
	enum ce_op_state state;

	void (*send_cb) (struct ce_state *ce_state,
	void (*send_cb) (struct ath10k_ce_pipe *ce_state,
			 void *per_transfer_send_context,
			 u32 buffer,
			 unsigned int nbytes,
			 unsigned int transfer_id);
	void (*recv_cb) (struct ce_state *ce_state,
	void (*recv_cb) (struct ath10k_ce_pipe *ce_state,
			 void *per_transfer_recv_context,
			 u32 buffer,
			 unsigned int nbytes,
@@ -139,8 +129,8 @@ struct ce_state {
			 unsigned int flags);

	unsigned int src_sz_max;
	struct ce_ring_state *src_ring;
	struct ce_ring_state *dest_ring;
	struct ath10k_ce_ring *src_ring;
	struct ath10k_ce_ring *dest_ring;
};

struct ce_sendlist_item {
@@ -182,7 +172,7 @@ struct ce_attr;
 *
 * Implementation note: pushes 1 buffer to Source ring
 */
int ath10k_ce_send(struct ce_state *ce_state,
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
		   void *per_transfer_send_context,
		   u32 buffer,
		   unsigned int nbytes,
@@ -190,8 +180,8 @@ int ath10k_ce_send(struct ce_state *ce_state,
		   unsigned int transfer_id,
		   unsigned int flags);

void ath10k_ce_send_cb_register(struct ce_state *ce_state,
				void (*send_cb) (struct ce_state *ce_state,
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*send_cb)(struct ath10k_ce_pipe *ce_state,
						void *transfer_context,
						u32 buffer,
						unsigned int nbytes,
@@ -215,7 +205,7 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
 *
 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
 */
int ath10k_ce_sendlist_send(struct ce_state *ce_state,
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
			    void *per_transfer_send_context,
			    struct ce_sendlist *sendlist,
			    /* 14 bits */
@@ -233,12 +223,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
 *
 * Implemenation note: Pushes a buffer to Dest ring.
 */
int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
			       void *per_transfer_recv_context,
			       u32 buffer);

void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
				void (*recv_cb) (struct ce_state *ce_state,
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
						void *transfer_context,
						u32 buffer,
						unsigned int nbytes,
@@ -253,7 +243,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
 * Supply data for the next completed unprocessed receive descriptor.
 * Pops buffer from Dest ring.
 */
int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
				  u32 *bufferp,
				  unsigned int *nbytesp,
@@ -263,7 +253,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
 * Supply data for the next completed unprocessed send descriptor.
 * Pops 1 completed send buffer from Source ring.
 */
int ath10k_ce_completed_send_next(struct ce_state *ce_state,
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
			   void **per_transfer_contextp,
			   u32 *bufferp,
			   unsigned int *nbytesp,
@@ -272,7 +262,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/

/* Initialize an instance of a CE */
struct ce_state *ath10k_ce_init(struct ath10k *ar,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
				unsigned int ce_id,
				const struct ce_attr *attr);

@@ -282,7 +272,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
 * receive buffers.  Target DMA must be stopped before using
 * this API.
 */
int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       u32 *bufferp);

@@ -291,13 +281,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
 * pending sends.  Target DMA must be stopped before using
 * this API.
 */
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
			       void **per_transfer_contextp,
			       u32 *bufferp,
			       unsigned int *nbytesp,
			       unsigned int *transfer_idp);

void ath10k_ce_deinit(struct ce_state *ce_state);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);

/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +312,6 @@ struct ce_attr {
	/* CE_ATTR_* values */
	unsigned int flags;

	/* currently not in use */
	unsigned int priority;

	/* #entries in source ring - Must be a power of 2 */
	unsigned int src_nentries;

@@ -336,9 +323,6 @@ struct ce_attr {

	/* #entries in destination ring - Must be a power of 2 */
	unsigned int dest_nentries;

	/* Future use */
	void *reserved;
};

/*
+34 −12
Original line number Diff line number Diff line
@@ -38,17 +38,6 @@ MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");

static const struct ath10k_hw_params ath10k_hw_params_list[] = {
	{
		.id = QCA988X_HW_1_0_VERSION,
		.name = "qca988x hw1.0",
		.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
		.fw = {
			.dir = QCA988X_HW_1_0_FW_DIR,
			.fw = QCA988X_HW_1_0_FW_FILE,
			.otp = QCA988X_HW_1_0_OTP_FILE,
			.board = QCA988X_HW_1_0_BOARD_DATA_FILE,
		},
	},
	{
		.id = QCA988X_HW_2_0_VERSION,
		.name = "qca988x hw2.0",
@@ -717,10 +706,43 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
	return 0;
}

int ath10k_core_register(struct ath10k *ar)
static int ath10k_core_check_chip_id(struct ath10k *ar)
{
	u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);

	/* Check that we are not using hw1.0 (some of them have same pci id
	 * as hw2.0) before doing anything else as ath10k crashes horribly
	 * due to missing hw1.0 workarounds. */
	switch (hw_revision) {
	case QCA988X_HW_1_0_CHIP_ID_REV:
		ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
		return -EOPNOTSUPP;

	case QCA988X_HW_2_0_CHIP_ID_REV:
		/* known hardware revision, continue normally */
		return 0;

	default:
		ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
			    ar->chip_id);
		return 0;
	}

	return 0;
}

int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
	int status;

	ar->chip_id = chip_id;

	status = ath10k_core_check_chip_id(ar);
	if (status) {
		ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
		return status;
	}

	status = ath10k_core_probe_fw(ar);
	if (status) {
		ath10k_err("could not probe fw (%d)\n", status);
+12 −1
Original line number Diff line number Diff line
@@ -270,12 +270,21 @@ enum ath10k_state {
	ATH10K_STATE_WEDGED,
};

enum ath10k_fw_features {
	/* wmi_mgmt_rx_hdr contains extra RSSI information */
	ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,

	/* keep last */
	ATH10K_FW_FEATURE_COUNT,
};

struct ath10k {
	struct ath_common ath_common;
	struct ieee80211_hw *hw;
	struct device *dev;
	u8 mac_addr[ETH_ALEN];

	u32 chip_id;
	u32 target_version;
	u8 fw_version_major;
	u32 fw_version_minor;
@@ -288,6 +297,8 @@ struct ath10k {
	u32 vht_cap_info;
	u32 num_rf_chains;

	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);

	struct targetdef *targetdef;
	struct hostdef *hostdef;

@@ -393,7 +404,7 @@ void ath10k_core_destroy(struct ath10k *ar);

int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);

#endif /* _CORE_H_ */
+22 −1

File changed.

Preview size limit exceeded, changes collapsed.

Loading