Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51c12756 authored by Kalle Valo's avatar Kalle Valo
Browse files
ath.git patches for 4.18. Major changes:

ath10k

* enable temperature reads for QCA6174 and QCA9377

* add firmware memory dump support for QCA9984

* continue adding WCN3990 support via SNOC bus
parents 37637700 4836ec42
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ Required properties:
- compatible: Should be one of the following:
	* "qcom,ath10k"
	* "qcom,ipq4019-wifi"
	* "qcom,wcn3990-wifi"

PCI based devices uses compatible string "qcom,ath10k" and takes calibration
data along with board specific data via "qcom,ath10k-calibration-data".
@@ -18,8 +19,12 @@ In general, entry "qcom,ath10k-pre-calibration-data" and
"qcom,ath10k-calibration-data" conflict with each other and only one
can be provided per device.

SNOC based devices (i.e. wcn3990) uses compatible string "qcom,wcn3990-wifi".

Optional properties:
- reg: Address and length of the register set for the device.
- reg-names: Must include the list of following reg names,
	     "membase"
- resets: Must contain an entry for each entry in reset-names.
          See ../reset/reseti.txt for details.
- reset-names: Must include the list of following reset names,
@@ -49,6 +54,8 @@ Optional properties:
				 hw versions.
- qcom,ath10k-pre-calibration-data : pre calibration data as an array,
				     the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
			   optional "supply-name" is "vdd-0.8-cx-mx".

Example (to supply the calibration data alone):

@@ -119,3 +126,27 @@ wifi0: wifi@a000000 {
	qcom,msi_base = <0x40>;
	qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ];
};

Example (to supply wcn3990 SoC wifi block details):

wifi@18000000 {
		compatible = "qcom,wcn3990-wifi";
		reg = <0x18800000 0x800000>;
		reg-names = "membase";
		clocks = <&clock_gcc clk_aggre2_noc_clk>;
		clock-names = "smmu_aggre2_noc_clk"
		interrupts =
			   <0 130 0 /* CE0 */ >,
			   <0 131 0 /* CE1 */ >,
			   <0 132 0 /* CE2 */ >,
			   <0 133 0 /* CE3 */ >,
			   <0 134 0 /* CE4 */ >,
			   <0 135 0 /* CE5 */ >,
			   <0 136 0 /* CE6 */ >,
			   <0 137 0 /* CE7 */ >,
			   <0 138 0 /* CE8 */ >,
			   <0 139 0 /* CE9 */ >,
			   <0 140 0 /* CE10 */ >,
			   <0 141 0 /* CE11 */ >;
		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
};
+12 −0
Original line number Diff line number Diff line
@@ -4,12 +4,16 @@ config ATH10K
	select ATH_COMMON
	select CRC32
	select WANT_DEV_COREDUMP
	select ATH10K_CE
        ---help---
          This module adds support for wireless adapters based on
          Atheros IEEE 802.11ac family of chipsets.

          If you choose to build a module, it'll be called ath10k.

config ATH10K_CE
	bool

config ATH10K_PCI
	tristate "Atheros ath10k PCI support"
	depends on ATH10K && PCI
@@ -36,6 +40,14 @@ config ATH10K_USB
	  This module adds experimental support for USB bus. Currently
	  work in progress and will not fully work.

config ATH10K_SNOC
        tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
        depends on ATH10K && ARCH_QCOM
        ---help---
          This module adds support for integrated WCN3990 chip connected
          to system NOC(SNOC). Currently work in progress and will not
          fully work.

config ATH10K_DEBUG
	bool "Atheros ath10k debugging"
	depends on ATH10K
+5 −2
Original line number Diff line number Diff line
@@ -22,10 +22,10 @@ ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
ath10k_core-$(CONFIG_ATH10K_CE) += ce.o

obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
		ce.o
ath10k_pci-y += pci.o

ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o

@@ -35,5 +35,8 @@ ath10k_sdio-y += sdio.o
obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o

obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
ath10k_snoc-y += snoc.o

# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
+260 −9
Original line number Diff line number Diff line
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +59,74 @@
 * the buffer is sent/received.
 */

static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
					struct ath10k_ce_pipe *ce_state)
{
	u32 ce_id = ce_state->id;
	u32 addr = 0;

	switch (ce_id) {
	case 0:
		addr = 0x00032000;
		break;
	case 3:
		addr = 0x0003200C;
		break;
	case 4:
		addr = 0x00032010;
		break;
	case 5:
		addr = 0x00032014;
		break;
	case 7:
		addr = 0x0003201C;
		break;
	default:
		ath10k_warn(ar, "invalid CE id: %d", ce_id);
		break;
	}
	return addr;
}

static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
					 struct ath10k_ce_pipe *ce_state)
{
	u32 ce_id = ce_state->id;
	u32 addr = 0;

	switch (ce_id) {
	case 1:
		addr = 0x00032034;
		break;
	case 2:
		addr = 0x00032038;
		break;
	case 5:
		addr = 0x00032044;
		break;
	case 7:
		addr = 0x0003204C;
		break;
	case 8:
		addr = 0x00032050;
		break;
	case 9:
		addr = 0x00032054;
		break;
	case 10:
		addr = 0x00032058;
		break;
	case 11:
		addr = 0x0003205C;
		break;
	default:
		ath10k_warn(ar, "invalid CE id: %d", ce_id);
		break;
	}

	return addr;
}

static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
		     struct ath10k_hw_ce_regs_addr_map *addr_map)
@@ -116,11 +185,46 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
				ar->hw_ce_regs->sr_wr_index_addr);
}

static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
							 u32 ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);

	return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
}

static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
						    u32 ce_ctrl_addr)
{
	return ath10k_ce_read32(ar, ce_ctrl_addr +
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
	u32 index;

	if (ar->hw_params.rri_on_ddr &&
	    (ce_state->attr_flags & CE_ATTR_DIS_INTR))
		index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
	else
		index = ath10k_ce_read32(ar, ce_ctrl_addr +
					 ar->hw_ce_regs->current_srri_addr);

	return index;
}

static inline void
ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
					  struct ath10k_ce_pipe *ce_state,
					  unsigned int value)
{
	ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
}

static inline void
ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
					   struct ath10k_ce_pipe *ce_state,
					   unsigned int value)
{
	ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
}

static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
@@ -181,11 +285,31 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
			  ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
}

static inline
	u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);

	return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
		CE_DDR_RRI_MASK;
}

static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
						     u32 ce_ctrl_addr)
{
	return ath10k_ce_read32(ar, ce_ctrl_addr +
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
	u32 index;

	if (ar->hw_params.rri_on_ddr &&
	    (ce_state->attr_flags & CE_ATTR_DIS_INTR))
		index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
	else
		index = ath10k_ce_read32(ar, ce_ctrl_addr +
					 ar->hw_ce_regs->current_drri_addr);

	return index;
}

static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
@@ -376,8 +500,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);

	/* WORKAROUND */
	if (!(flags & CE_SEND_FLAG_GATHER))
		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
	if (!(flags & CE_SEND_FLAG_GATHER)) {
		if (ar->hw_params.shadow_reg_support)
			ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
								  write_index);
		else
			ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
							   write_index);
	}

	src_ring->write_index = write_index;
exit:
@@ -395,7 +525,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	struct ce_desc_64 *desc, sdesc;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	unsigned int sw_index;
	unsigned int write_index = src_ring->write_index;
	u32 ctrl_addr = ce_state->ctrl_addr;
	__le32 *addr;
@@ -409,6 +539,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
			    __func__, nbytes, ce_state->src_sz_max);

	if (ar->hw_params.rri_on_ddr)
		sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
	else
		sw_index = src_ring->sw_index;

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
		ret = -ENOSR;
@@ -464,6 +599,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
	return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
				    buffer, nbytes, transfer_id, flags);
}
EXPORT_SYMBOL(ath10k_ce_send_nolock);

void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
@@ -491,6 +627,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)

	src_ring->per_transfer_context[src_ring->write_index] = NULL;
}
EXPORT_SYMBOL(__ath10k_ce_send_revert);

int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
		   void *per_transfer_context,
@@ -510,6 +647,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,

	return ret;
}
EXPORT_SYMBOL(ath10k_ce_send);

int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
@@ -525,6 +663,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)

	return delta;
}
EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);

int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
@@ -539,6 +678,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)

	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);

static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
				   dma_addr_t paddr)
@@ -615,13 +755,14 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
	/* Prevent CE ring stuck issue that will occur when ring is full.
	 * Make sure that write index is 1 less than read index.
	 */
	if ((cur_write_idx + nentries)  == dest_ring->sw_index)
	if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
		nentries -= 1;

	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
	dest_ring->write_index = write_index;
}
EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);

int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
			  dma_addr_t paddr)
@@ -636,6 +777,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,

	return ret;
}
EXPORT_SYMBOL(ath10k_ce_rx_post_buf);

/*
 * Guts of ath10k_ce_completed_recv_next.
@@ -748,6 +890,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
							    per_transfer_ctx,
							    nbytesp);
}
EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);

int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
@@ -766,6 +909,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,

	return ret;
}
EXPORT_SYMBOL(ath10k_ce_completed_recv_next);

static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
				       void **per_transfer_contextp,
@@ -882,6 +1026,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
						  per_transfer_contextp,
						  bufferp);
}
EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);

/*
 * Guts of ath10k_ce_completed_send_next.
@@ -915,6 +1060,9 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
		src_ring->hw_index = read_index;
	}

	if (ar->hw_params.rri_on_ddr)
		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
	else
		read_index = src_ring->hw_index;

	if (read_index == sw_index)
@@ -936,6 +1084,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,

	return 0;
}
EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);

static void ath10k_ce_extract_desc_data(struct ath10k *ar,
					struct ath10k_ce_ring *src_ring,
@@ -1025,6 +1174,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,

	return ret;
}
EXPORT_SYMBOL(ath10k_ce_cancel_send_next);

int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp)
@@ -1040,6 +1190,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,

	return ret;
}
EXPORT_SYMBOL(ath10k_ce_completed_send_next);

/*
 * Guts of interrupt handler for per-engine interrupts on a particular CE.
@@ -1078,6 +1229,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)

	spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);

/*
 * Handler for per-engine interrupts on ALL active CEs.
@@ -1102,6 +1254,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
		ath10k_ce_per_engine_service(ar, ce_id);
	}
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);

/*
 * Adjust interrupts for the copy complete handler.
@@ -1139,6 +1292,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)

	return 0;
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);

void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
@@ -1154,6 +1308,7 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
		ath10k_ce_per_engine_handler_adjust(ce_state);
	}
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);

static int ath10k_ce_init_src_ring(struct ath10k *ar,
				   unsigned int ce_id,
@@ -1234,6 +1389,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
	return 0;
}

static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
				       struct ath10k_ce_ring *src_ring,
				       u32 nentries)
{
	src_ring->shadow_base_unaligned = kcalloc(nentries,
						  sizeof(struct ce_desc),
						  GFP_KERNEL);
	if (!src_ring->shadow_base_unaligned)
		return -ENOMEM;

	src_ring->shadow_base = (struct ce_desc *)
			PTR_ALIGN(src_ring->shadow_base_unaligned,
				  CE_DESC_RING_ALIGN);
	return 0;
}

static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
			 const struct ce_attr *attr)
@@ -1241,6 +1412,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
	struct ath10k_ce_ring *src_ring;
	u32 nentries = attr->src_nentries;
	dma_addr_t base_addr;
	int ret;

	nentries = roundup_pow_of_two(nentries);

@@ -1277,6 +1449,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
			ALIGN(src_ring->base_addr_ce_space_unaligned,
			      CE_DESC_RING_ALIGN);

	if (ar->hw_params.shadow_reg_support) {
		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
		if (ret) {
			dma_free_coherent(ar->dev,
					  (nentries * sizeof(struct ce_desc) +
					   CE_DESC_RING_ALIGN),
					  src_ring->base_addr_owner_space_unaligned,
					  base_addr);
			kfree(src_ring);
			return ERR_PTR(ret);
		}
	}

	return src_ring;
}

@@ -1287,6 +1472,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
	struct ath10k_ce_ring *src_ring;
	u32 nentries = attr->src_nentries;
	dma_addr_t base_addr;
	int ret;

	nentries = roundup_pow_of_two(nentries);

@@ -1322,6 +1508,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
			ALIGN(src_ring->base_addr_ce_space_unaligned,
			      CE_DESC_RING_ALIGN);

	if (ar->hw_params.shadow_reg_support) {
		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
		if (ret) {
			dma_free_coherent(ar->dev,
					  (nentries * sizeof(struct ce_desc) +
					   CE_DESC_RING_ALIGN),
					  src_ring->base_addr_owner_space_unaligned,
					  base_addr);
			kfree(src_ring);
			return ERR_PTR(ret);
		}
	}

	return src_ring;
}

@@ -1454,6 +1653,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,

	return 0;
}
EXPORT_SYMBOL(ath10k_ce_init_pipe);

static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
@@ -1479,6 +1679,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
	ath10k_ce_deinit_src_ring(ar, ce_id);
	ath10k_ce_deinit_dest_ring(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_deinit_pipe);

static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
@@ -1486,6 +1687,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];

	if (ce_state->src_ring) {
		if (ar->hw_params.shadow_reg_support)
			kfree(ce_state->src_ring->shadow_base_unaligned);
		dma_free_coherent(ar->dev,
				  (ce_state->src_ring->nentries *
				   sizeof(struct ce_desc) +
@@ -1515,6 +1718,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];

	if (ce_state->src_ring) {
		if (ar->hw_params.shadow_reg_support)
			kfree(ce_state->src_ring->shadow_base_unaligned);
		dma_free_coherent(ar->dev,
				  (ce_state->src_ring->nentries *
				   sizeof(struct ce_desc_64) +
@@ -1545,6 +1750,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)

	ce_state->ops->ce_free_pipe(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_free_pipe);

void ath10k_ce_dump_registers(struct ath10k *ar,
			      struct ath10k_fw_crash_data *crash_data)
@@ -1584,6 +1790,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,

	spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_dump_registers);

static const struct ath10k_ce_ops ce_ops = {
	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
@@ -1680,3 +1887,47 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,

	return 0;
}
EXPORT_SYMBOL(ath10k_ce_alloc_pipe);

void ath10k_ce_alloc_rri(struct ath10k *ar)
{
	int i;
	u32 value;
	u32 ctrl1_regs;
	u32 ce_base_addr;
	struct ath10k_ce *ce = ath10k_ce_priv(ar);

	ce->vaddr_rri = dma_alloc_coherent(ar->dev,
					   (CE_COUNT * sizeof(u32)),
					   &ce->paddr_rri, GFP_KERNEL);

	if (!ce->vaddr_rri)
		return;

	ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
			  lower_32_bits(ce->paddr_rri));
	ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
			  (upper_32_bits(ce->paddr_rri) &
			  CE_DESC_FLAGS_GET_MASK));

	for (i = 0; i < CE_COUNT; i++) {
		ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
		ce_base_addr = ath10k_ce_base_address(ar, i);
		value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
		value |= ar->hw_ce_regs->upd->mask;
		ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
	}

	memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
}
EXPORT_SYMBOL(ath10k_ce_alloc_rri);

void ath10k_ce_free_rri(struct ath10k *ar)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);

	dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
			  ce->vaddr_rri,
			  ce->paddr_rri);
}
EXPORT_SYMBOL(ath10k_ce_free_rri);
+21 −3
Original line number Diff line number Diff line
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
@@ -48,6 +49,9 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb

#define CE_DDR_RRI_MASK			GENMASK(15, 0)
#define CE_DDR_DRRI_SHIFT		16

struct ce_desc {
	__le32 addr;
	__le16 nbytes;
@@ -113,6 +117,9 @@ struct ath10k_ce_ring {
	/* CE address space */
	u32 base_addr_ce_space;

	char *shadow_base_unaligned;
	struct ce_desc *shadow_base;

	/* keep last */
	void *per_transfer_context[0];
};
@@ -153,6 +160,8 @@ struct ath10k_ce {
	spinlock_t ce_lock;
	const struct ath10k_bus_ops *bus_ops;
	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
	u32 *vaddr_rri;
	dma_addr_t paddr_rri;
};

/*==================Send====================*/
@@ -261,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
void ath10k_ce_dump_registers(struct ath10k *ar,
			      struct ath10k_fw_crash_data *crash_data);
void ath10k_ce_alloc_rri(struct ath10k *ar);
void ath10k_ce_free_rri(struct ath10k *ar);

/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
@@ -327,6 +338,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}

#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \
		- CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))

#define CE_SRC_RING_TO_DESC(baddr, idx) \
	(&(((struct ce_desc *)baddr)[idx]))

@@ -355,14 +369,18 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
	(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS			0x0000
#define CE_INTERRUPT_SUMMARY		(GENMASK(CE_COUNT_MAX - 1, 0))

static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);

	if (!ar->hw_params.per_ce_irq)
		return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
			ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
			CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
	else
		return CE_INTERRUPT_SUMMARY;
}

#endif /* _CE_H_ */
Loading