Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59e25676 authored by John W. Linville's avatar John W. Linville
Browse files

Merge branch 'for-linville' of git://github.com/kvalo/ath

parents 7bb75da1 0fdc14e4
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@ config ATH10K_DEBUG
config ATH10K_DEBUGFS
	bool "Atheros ath10k debugfs support"
	depends on ATH10K
	select RELAY
	---help---
	  Enabled debugfs support

+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@ ath10k_core-y += mac.o \
		 wmi.o \
		 bmi.o

ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o

obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+26 −26
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@

void ath10k_bmi_start(struct ath10k *ar)
{
	ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");

	ar->bmi.done_sent = false;
}
@@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");

	if (ar->bmi.done_sent) {
		ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
		return 0;
	}

@@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar)

	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
	if (ret) {
		ath10k_warn("unable to write to the device: %d\n", ret);
		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
		return ret;
	}

@@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
	u32 resplen = sizeof(resp.get_target_info);
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");

	if (ar->bmi.done_sent) {
		ath10k_warn("BMI Get Target Info Command disallowed\n");
		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
		return -EBUSY;
	}

@@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,

	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
	if (ret) {
		ath10k_warn("unable to get target info from device\n");
		ath10k_warn(ar, "unable to get target info from device\n");
		return ret;
	}

	if (resplen < sizeof(resp.get_target_info)) {
		ath10k_warn("invalid get_target_info response length (%d)\n",
		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
			    resplen);
		return -EIO;
	}
@@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
	u32 rxlen;
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
		   address, length);

	if (ar->bmi.done_sent) {
		ath10k_warn("command disallowed\n");
		ath10k_warn(ar, "command disallowed\n");
		return -EBUSY;
	}

@@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
						  &resp, &rxlen);
		if (ret) {
			ath10k_warn("unable to read from the device (%d)\n",
			ath10k_warn(ar, "unable to read from the device (%d)\n",
				    ret);
			return ret;
		}
@@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
	u32 txlen;
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
		   address, length);

	if (ar->bmi.done_sent) {
		ath10k_warn("command disallowed\n");
		ath10k_warn(ar, "command disallowed\n");
		return -EBUSY;
	}

@@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
						  NULL, NULL);
		if (ret) {
			ath10k_warn("unable to write to the device (%d)\n",
			ath10k_warn(ar, "unable to write to the device (%d)\n",
				    ret);
			return ret;
		}
@@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
	u32 resplen = sizeof(resp.execute);
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
		   address, param);

	if (ar->bmi.done_sent) {
		ath10k_warn("command disallowed\n");
		ath10k_warn(ar, "command disallowed\n");
		return -EBUSY;
	}

@@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)

	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
	if (ret) {
		ath10k_warn("unable to read from the device\n");
		ath10k_warn(ar, "unable to read from the device\n");
		return ret;
	}

	if (resplen < sizeof(resp.execute)) {
		ath10k_warn("invalid execute response length (%d)\n",
		ath10k_warn(ar, "invalid execute response length (%d)\n",
			    resplen);
		return -EIO;
	}

	*result = __le32_to_cpu(resp.execute.result);

	ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);

	return 0;
}
@@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
	u32 txlen;
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
		   buffer, length);

	if (ar->bmi.done_sent) {
		ath10k_warn("command disallowed\n");
		ath10k_warn(ar, "command disallowed\n");
		return -EBUSY;
	}

@@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
						  NULL, NULL);
		if (ret) {
			ath10k_warn("unable to write to the device\n");
			ath10k_warn(ar, "unable to write to the device\n");
			return ret;
		}

@@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
		   address);

	if (ar->bmi.done_sent) {
		ath10k_warn("command disallowed\n");
		ath10k_warn(ar, "command disallowed\n");
		return -EBUSY;
	}

@@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)

	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
	if (ret) {
		ath10k_warn("unable to Start LZ Stream to the device\n");
		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
		return ret;
	}

@@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
	u32 trailer_len = length - head_len;
	int ret;

	ath10k_dbg(ATH10K_DBG_BMI,
	ath10k_dbg(ar, ATH10K_DBG_BMI,
		   "bmi fast download address 0x%x buffer 0x%p length %d\n",
		   address, buffer, length);

+66 −119
Original line number Diff line number Diff line
@@ -284,13 +284,9 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
	int ret = 0;

	if (nbytes > ce_state->src_sz_max)
		ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
			    __func__, nbytes, ce_state->src_sz_max);

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
		ret = -ENOSR;
@@ -325,7 +321,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,

	src_ring->write_index = write_index;
exit:
	ath10k_pci_sleep(ar);
	return ret;
}

@@ -390,49 +385,57 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
	return delta;
}

int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
			       void *per_recv_context,
			       u32 buffer)

int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index;
	unsigned int sw_index;
	int ret;
	unsigned int write_index = dest_ring->write_index;
	unsigned int sw_index = dest_ring->sw_index;

	spin_lock_bh(&ar_pci->ce_lock);
	write_index = dest_ring->write_index;
	sw_index = dest_ring->sw_index;
	lockdep_assert_held(&ar_pci->ce_lock);

	ret = ath10k_pci_wake(ar);
	if (ret)
		goto out;
	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}

	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index = dest_ring->write_index;
	unsigned int sw_index = dest_ring->sw_index;
	struct ce_desc *base = dest_ring->base_addr_owner_space;
	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
	u32 ctrl_addr = pipe->ctrl_addr;

		/* Update destination descriptor */
		desc->addr    = __cpu_to_le32(buffer);
		desc->nbytes = 0;
	lockdep_assert_held(&ar_pci->ce_lock);

	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
		return -EIO;

		dest_ring->per_transfer_context[write_index] =
							per_recv_context;
	desc->addr = __cpu_to_le32(paddr);
	desc->nbytes = 0;

		/* Update Destination Ring Write Index */
	dest_ring->per_transfer_context[write_index] = ctx;
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
	dest_ring->write_index = write_index;
		ret = 0;
	} else {
		ret = -EIO;

	return 0;
}
	ath10k_pci_sleep(ar);

out:
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
@@ -588,7 +591,6 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
	unsigned int sw_index = src_ring->sw_index;
	struct ce_desc *sdesc, *sbase;
	unsigned int read_index;
	int ret;

	if (src_ring->hw_index == sw_index) {
		/*
@@ -599,18 +601,12 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
		 * value of the HW index has become stale.
		 */

		ret = ath10k_pci_wake(ar);
		if (ret)
			return ret;

		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
		if (read_index == 0xffffffff)
			return -ENODEV;

		read_index &= nentries_mask;
		src_ring->hw_index = read_index;

		ath10k_pci_sleep(ar);
	}

	read_index = src_ring->hw_index;
@@ -731,11 +727,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ce_state->ctrl_addr;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	spin_lock_bh(&ar_pci->ce_lock);

@@ -760,7 +751,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);
	ath10k_pci_sleep(ar);
}

/*
@@ -771,13 +761,9 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)

void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
	int ce_id, ret;
	int ce_id;
	u32 intr_summary;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	intr_summary = CE_INTERRUPT_SUMMARY(ar);

	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
@@ -789,8 +775,6 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)

		ath10k_ce_per_engine_service(ar, ce_id);
	}

	ath10k_pci_sleep(ar);
}

/*
@@ -800,16 +784,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 *
 * Called with ce_lock held.
 */
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
						int disable_copy_compl_intr)
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
{
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;
	bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;

	if ((!disable_copy_compl_intr) &&
	    (ce_state->send_cb || ce_state->recv_cb))
@@ -818,17 +797,11 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);

	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);

	ath10k_pci_sleep(ar);
}

int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
	int ce_id, ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;
	int ce_id;

	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
		u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -838,34 +811,16 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
	}

	ath10k_pci_sleep(ar);

	return 0;
}

void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*send_cb)(struct ath10k_ce_pipe *),
				int disable_interrupts)
void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ce_id;

	spin_lock_bh(&ar_pci->ce_lock);
	ce_state->send_cb = send_cb;
	ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
	spin_unlock_bh(&ar_pci->ce_lock);
}

void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*recv_cb)(struct ath10k_ce_pipe *))
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);

	spin_lock_bh(&ar_pci->ce_lock);
	ce_state->recv_cb = recv_cb;
	ath10k_ce_per_engine_handler_adjust(ce_state, 0);
	spin_unlock_bh(&ar_pci->ce_lock);
	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
		ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}

static int ath10k_ce_init_src_ring(struct ath10k *ar,
@@ -898,7 +853,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);

	ath10k_dbg(ATH10K_DBG_BOOT,
	ath10k_dbg(ar, ATH10K_DBG_BOOT,
		   "boot init ce src ring id %d entries %d base_addr %p\n",
		   ce_id, nentries, src_ring->base_addr_owner_space);

@@ -932,7 +887,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);

	ath10k_dbg(ATH10K_DBG_BOOT,
	ath10k_dbg(ar, ATH10K_DBG_BOOT,
		   "boot ce dest ring id %d entries %d base_addr %p\n",
		   ce_id, nentries, dest_ring->base_addr_owner_space);

@@ -1067,7 +1022,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 * initialized by software/firmware.
 */
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
			const struct ce_attr *attr)
			const struct ce_attr *attr,
			void (*send_cb)(struct ath10k_ce_pipe *),
			void (*recv_cb)(struct ath10k_ce_pipe *))
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@@ -1084,39 +1041,37 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
	BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;
	if (attr->src_nentries)
		ce_state->send_cb = send_cb;
	if (attr->dest_nentries)
		ce_state->recv_cb = recv_cb;
	spin_unlock_bh(&ar_pci->ce_lock);

	if (attr->src_nentries) {
		ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
			ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
				   ce_id, ret);
			goto out;
			return ret;
		}
	}

	if (attr->dest_nentries) {
		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
			ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
				   ce_id, ret);
			goto out;
			return ret;
		}
	}

out:
	ath10k_pci_sleep(ar);
	return ret;
	return 0;
}

static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
@@ -1140,16 +1095,8 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)

void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	ath10k_ce_deinit_src_ring(ar, ce_id);
	ath10k_ce_deinit_dest_ring(ar, ce_id);

	ath10k_pci_sleep(ar);
}

int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
@@ -1163,7 +1110,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
		if (IS_ERR(ce_state->src_ring)) {
			ret = PTR_ERR(ce_state->src_ring);
			ath10k_err("failed to allocate copy engine source ring %d: %d\n",
			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
				   ce_id, ret);
			ce_state->src_ring = NULL;
			return ret;
@@ -1175,7 +1122,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
								attr);
		if (IS_ERR(ce_state->dest_ring)) {
			ret = PTR_ERR(ce_state->dest_ring);
			ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
				   ce_id, ret);
			ce_state->dest_ring = NULL;
			return ret;
+7 −21
Original line number Diff line number Diff line
@@ -162,30 +162,13 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,

void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);

void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*send_cb)(struct ath10k_ce_pipe *),
				int disable_interrupts);

int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);

/*==================Recv=======================*/

/*
 * Make a buffer available to receive. The buffer must be at least of a
 * minimal size appropriate for this copy engine (src_sz_max attribute).
 *   ce                    - which copy engine to use
 *   per_transfer_recv_context  - context passed back to caller's recv_cb
 *   buffer                     - address of buffer in CE space
 * Returns 0 on success; otherwise an error status.
 *
 * Implemenation note: Pushes a buffer to Dest ring.
 */
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
			       void *per_transfer_recv_context,
			       u32 buffer);

void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
				void (*recv_cb)(struct ath10k_ce_pipe *));
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);

/* recv flags */
/* Data is byte-swapped */
@@ -214,7 +197,9 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/

int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
			const struct ce_attr *attr);
			const struct ce_attr *attr,
			void (*send_cb)(struct ath10k_ce_pipe *),
			void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
			  const struct ce_attr *attr);
@@ -245,6 +230,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);

/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
Loading