Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f39dc7f authored by Jinesh K. Jayakumar's avatar Jinesh K. Jayakumar
Browse files

msm: ipa: Add support for memory allocator in offload sub-system



Add support for offload and network drivers to use custom memory
allocators for allocating channel and descriptor memory.

Change-Id: I764d96fa80222e6bae14a30240fc45d60b2691a1
Signed-off-by: default avatarJinesh K. Jayakumar <jineshk@codeaurora.org>
parent b741b1ab
Loading
Loading
Loading
Loading
+174 −47
Original line number Diff line number Diff line
@@ -14,28 +14,99 @@

#include <linux/gfp.h>
#include <linux/slab.h>

#define IPA_ETH_NET_DRIVER
#include <linux/ipa_eth.h>

#include "atl_fwd.h"
#include "atl_qcom_ipa.h"

#define ATL_IPA_DEFAULT_RING_SZ 128
#define ATL_IPA_DEFAULT_BUFF_SZ 2048

static inline struct atl_fwd_ring *CH_RING(struct ipa_eth_channel *ch)
{
	return (struct atl_fwd_ring *)(ch->nd_priv);
}

static void *atl_ipa_dma_alloc(struct device *dev, size_t size,
			       dma_addr_t *daddr, gfp_t gfp,
			       struct ipa_eth_dma_allocator *dma_allocator)
{
	struct atl_nic *nic = (struct atl_nic *)dev_get_drvdata(dev);
	struct ipa_eth_device *eth_dev = nic->fwd.private;
	struct ipa_eth_resource mem;

	if (dma_allocator->alloc(eth_dev, size, gfp, &mem))
		return NULL;

	if (daddr)
		*daddr = mem.daddr;

	return mem.vaddr;
}

static void atl_ipa_dma_free(void *buf, struct device *dev, size_t size,
			     dma_addr_t daddr,
			     struct ipa_eth_dma_allocator *dma_allocator)
{
	struct atl_nic *nic = (struct atl_nic *)dev_get_drvdata(dev);
	struct ipa_eth_device *eth_dev = nic->fwd.private;
	struct ipa_eth_resource mem = {
		.size = size,
		.vaddr = buf,
		.daddr = daddr,
	};

	return dma_allocator->free(eth_dev, &mem);
}

static void *atl_ipa_alloc_descs(struct device *dev, size_t size,
				 dma_addr_t *daddr, gfp_t gfp,
				 struct atl_fwd_mem_ops *ops)
{
	struct ipa_eth_channel *ch = ops->private;

	return atl_ipa_dma_alloc(dev, size, daddr, gfp,
			ch->mem_params.desc.allocator);
}

static void *atl_ipa_alloc_buf(struct device *dev, size_t size,
			       dma_addr_t *daddr, gfp_t gfp,
			       struct atl_fwd_mem_ops *ops)
{
	struct ipa_eth_channel *ch = ops->private;

	return atl_ipa_dma_alloc(dev, size, daddr, gfp,
			ch->mem_params.buff.allocator);
}

static void atl_ipa_free_descs(void *buf, struct device *dev, size_t size,
			       dma_addr_t daddr, struct atl_fwd_mem_ops *ops)
{
	struct ipa_eth_channel *ch = ops->private;

	return atl_ipa_dma_free(buf, dev, size, daddr,
			ch->mem_params.desc.allocator);
}

static void atl_ipa_free_buf(void *buf, struct device *dev, size_t size,
			     dma_addr_t daddr, struct atl_fwd_mem_ops *ops)
{
	struct ipa_eth_channel *ch = ops->private;

	return atl_ipa_dma_free(buf, dev, size, daddr,
			ch->mem_params.desc.allocator);
}

static int atl_ipa_open_device(struct ipa_eth_device *eth_dev)
{
	struct atl_nic *nic = (struct atl_nic *)dev_get_drvdata(eth_dev->dev);

	if (!nic || !nic->ndev) {
		dev_err(eth_dev->dev, "Invalid atl_nic");
		dev_err(eth_dev->dev, "Invalid atl_nic\n");
		return -ENODEV;
	}

	nic->fwd.private = eth_dev;

	/* atl specific init, ref counting go here */

	eth_dev->nd_priv = nic;
@@ -46,17 +117,48 @@ static int atl_ipa_open_device(struct ipa_eth_device *eth_dev)

static void atl_ipa_close_device(struct ipa_eth_device *eth_dev)
{
	struct atl_nic *nic = eth_dev->nd_priv;

	nic->fwd.private = NULL;

	eth_dev->nd_priv = NULL;
	eth_dev->net_dev = NULL;
}

static struct ipa_eth_channel *atl_ipa_request_channel(
	struct ipa_eth_device *eth_dev, enum ipa_eth_channel_dir dir,
	unsigned long features, unsigned long events)
	unsigned long events, unsigned long features,
	const struct ipa_eth_channel_mem_params *mem_params)
{
	struct atl_fwd_ring *ring = NULL;
	enum atl_fwd_ring_flags ring_flags = 0;
	struct ipa_eth_channel *channel = NULL;
	struct atl_fwd_mem_ops *mem_ops = NULL;
	struct ipa_eth_channel_mem *desc_mem = NULL;
	struct ipa_eth_channel_mem *buff_mem = NULL;
	size_t desc_count;
	size_t buff_size;

	channel =
		ipa_eth_net_alloc_channel(eth_dev, dir,
					  events, features, mem_params);
	if (!channel) {
		dev_err(eth_dev->dev, "Failed to alloc ipa eth channel\n");
		goto err_channel;
	}

	desc_count = channel->mem_params.desc.count;
	buff_size = channel->mem_params.buff.size;

	mem_ops = kzalloc(sizeof(*mem_ops), GFP_KERNEL);
	if (!mem_ops)
		goto err_mem_ops;

	mem_ops->alloc_descs = atl_ipa_alloc_descs;
	mem_ops->alloc_buf = atl_ipa_alloc_buf;
	mem_ops->free_descs = atl_ipa_free_descs;
	mem_ops->free_buf = atl_ipa_free_buf;
	mem_ops->private = channel;

	switch (dir) {
	case IPA_ETH_DIR_RX:
@@ -65,67 +167,92 @@ static struct ipa_eth_channel *atl_ipa_request_channel(
		ring_flags |= ATL_FWR_TX;
		break;
	default:
		dev_err(eth_dev->dev, "Unsupported direction %d", dir);
		return NULL;
		dev_err(eth_dev->dev, "Unsupported direction %d\n", dir);
		goto err_dir;
	}

	ring_flags |= ATL_FWR_ALLOC_BUFS;
	ring_flags |= ATL_FWR_CONTIG_BUFS;

	ring = atl_fwd_request_ring(eth_dev->net_dev, ring_flags,
				    ATL_IPA_DEFAULT_RING_SZ,
				    ATL_IPA_DEFAULT_BUFF_SZ, 1, NULL);
				    desc_count, buff_size, 1, mem_ops);
	if (IS_ERR_OR_NULL(ring)) {
		dev_err(eth_dev->dev, "Request ring failed");
		goto err_exit;
		dev_err(eth_dev->dev, "Request ring failed\n");
		goto err_ring;
	}

	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		goto err_exit;

	channel->events = 0;
	channel->features = 0;
	channel->direction = dir;
	channel->nd_priv = ring;
	channel->queue = ring->idx;

	channel->desc_size = 16;
	channel->desc_count = ring->hw.size;
	channel->desc_mem.size = channel->desc_size * channel->desc_count;
	desc_mem = kzalloc(sizeof(*desc_mem), GFP_KERNEL);
	if (!desc_mem)
		goto err_desc_mem;

	channel->desc_mem.vaddr = ring->hw.descs;
	channel->desc_mem.daddr = ring->hw.daddr;
	channel->desc_mem.paddr =
		page_to_phys(vmalloc_to_page(channel->desc_mem.vaddr));
	channel->mem_params.desc.size = 16;
	channel->mem_params.desc.count = ring->hw.size;

	channel->buff_size = ATL_IPA_DEFAULT_BUFF_SZ;
	channel->buff_count = channel->desc_count;
	channel->buff_mem.size = channel->buff_size * channel->buff_count;
	desc_mem->mem.size =
		channel->mem_params.desc.size * channel->mem_params.desc.count;
	desc_mem->mem.vaddr = ring->hw.descs;
	desc_mem->mem.daddr = ring->hw.daddr;
	desc_mem->mem.paddr = channel->mem_params.desc.allocator->paddr(
				eth_dev, desc_mem->mem.vaddr);

	channel->buff_mem.vaddr = (void *)ring->bufs->vaddr_vec;
	channel->buff_mem.daddr = ring->bufs->daddr_vec_base;
	channel->buff_mem.paddr = virt_to_phys((void *)ring->bufs->vaddr_vec);
	buff_mem = kzalloc(sizeof(*buff_mem), GFP_KERNEL);
	if (!buff_mem)
		goto err_buff_mem;

	channel->eth_dev = eth_dev;
	channel->nd_priv = ring;
	channel->mem_params.buff.size = buff_size;
	channel->mem_params.buff.count = channel->mem_params.desc.count;

	return channel;
	buff_mem->mem.size =
		channel->mem_params.buff.size * channel->mem_params.buff.count;
	buff_mem->mem.vaddr = (void *)ring->bufs->vaddr_vec;
	buff_mem->mem.daddr = ring->bufs->daddr_vec_base;
	buff_mem->mem.paddr = channel->mem_params.buff.allocator->paddr(
				eth_dev, buff_mem->mem.vaddr);

err_exit:
	kzfree(channel);
	list_add(&desc_mem->mem_list_entry, &channel->desc_mem);
	list_add(&buff_mem->mem_list_entry, &channel->buff_mem);

	if (!IS_ERR_OR_NULL(ring)) {
		atl_fwd_release_ring(ring);
		ring = NULL;
	}
	return channel;

err_buff_mem:
	kzfree(desc_mem);
err_desc_mem:
	atl_fwd_release_ring(ring);
err_ring:
err_dir:
	if (mem_ops)
		kzfree(mem_ops);
err_mem_ops:
	ipa_eth_net_free_channel(channel);
err_channel:
	return NULL;
}

static void atl_ipa_release_channel(struct ipa_eth_channel *ch)
{
	atl_fwd_release_ring(CH_RING(ch));
	kzfree(ch);
	struct ipa_eth_channel_mem *mem, *tmp;
	struct atl_fwd_ring *ring = CH_RING(ch);
	struct atl_fwd_mem_ops *mem_ops = ring->mem_ops;

	atl_fwd_release_ring(ring);

	if (mem_ops)
		kzfree(mem_ops);

	list_for_each_entry_safe(mem, tmp, &ch->desc_mem, mem_list_entry) {
		list_del(&mem->mem_list_entry);
		kzfree(mem);
	}

	list_for_each_entry_safe(mem, tmp, &ch->buff_mem, mem_list_entry) {
		list_del(&mem->mem_list_entry);
		kzfree(mem);
	}

	ipa_eth_net_free_channel(ch);
}

static int atl_ipa_enable_channel(struct ipa_eth_channel *ch)
@@ -153,7 +280,7 @@ static int atl_ipa_request_event(struct ipa_eth_channel *ch,
	case IPA_ETH_DEV_EV_RX_INT:
		if (ch->direction != IPA_ETH_DIR_RX) {
			dev_err(eth_dev->dev,
				"Rx interrupt requested on incorrect channel");
				"Rx interrupt requested on tx channel\n");
			return -EFAULT;
		}

@@ -166,7 +293,7 @@ static int atl_ipa_request_event(struct ipa_eth_channel *ch,
	case IPA_ETH_DEV_EV_TX_INT:
		if (ch->direction != IPA_ETH_DIR_TX) {
			dev_err(eth_dev->dev,
				"Tx interrupt requested on incorrect channel");
				"Tx interrupt requested on rx channel\n");
			return -EFAULT;
		}

@@ -179,7 +306,7 @@ static int atl_ipa_request_event(struct ipa_eth_channel *ch,
	case IPA_ETH_DEV_EV_TX_PTR:
		if (ch->direction != IPA_ETH_DIR_TX) {
			dev_err(eth_dev->dev,
				"Tx ptr wrb requested on incorrect channel");
				"Tx ptr wrb requested on rx channel\n");
			return -EFAULT;
		}

@@ -190,7 +317,7 @@ static int atl_ipa_request_event(struct ipa_eth_channel *ch,
		break;

	default:
		dev_err(eth_dev->dev, "Unsupported event requested");
		dev_err(eth_dev->dev, "Unsupported event requested\n");
		return -ENODEV;
	}

@@ -226,7 +353,7 @@ static void atl_ipa_release_event(struct ipa_eth_channel *ch,
		break;

	default:
		dev_err(eth_dev->dev, "Unsupported event for release");
		dev_err(eth_dev->dev, "Unsupported event for release\n");
		return;
	}

+1 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@ ipa-eth-y := \
	ipa_eth.o \
	ipa_eth_ep.o \
	ipa_eth_gsi.o \
	ipa_eth_net.o \
	ipa_eth_offload.o \
	ipa_eth_pci.o \
	ipa_eth_pm.o \
+29 −15
Original line number Diff line number Diff line
@@ -244,7 +244,9 @@ static void __ipa_eth_refresh_device(struct work_struct *work)

	if (initable(eth_dev)) {
		if (eth_dev->of_state == IPA_ETH_OF_ST_DEINITED) {
			IPA_ACTIVE_CLIENTS_INC_SIMPLE();
			(void) ipa_eth_init_device(eth_dev);
			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
@@ -255,7 +257,9 @@ static void __ipa_eth_refresh_device(struct work_struct *work)
	}

	if (startable(eth_dev)) {
		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
		(void) ipa_eth_start_device(eth_dev);
		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

		if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED) {
			ipa_eth_dev_err(eth_dev, "Failed to start device");
@@ -269,7 +273,9 @@ static void __ipa_eth_refresh_device(struct work_struct *work)
		ipa_eth_dev_log(eth_dev, "Start is disallowed for the device");

		if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED) {
			IPA_ACTIVE_CLIENTS_INC_SIMPLE();
			ipa_eth_stop_device(eth_dev);
			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
@@ -282,7 +288,9 @@ static void __ipa_eth_refresh_device(struct work_struct *work)
	if (!initable(eth_dev)) {
		ipa_eth_dev_log(eth_dev, "Init is disallowed for the device");

		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
		ipa_eth_deinit_device(eth_dev);
		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

		if (eth_dev->of_state != IPA_ETH_OF_ST_DEINITED) {
			ipa_eth_dev_err(eth_dev, "Failed to deinit device");
@@ -382,18 +390,6 @@ static void ipa_eth_ipa_ready_cb(void *data)
	ipa_eth_refresh_devices();
}

struct ipa_eth_device *ipa_eth_find_device(struct device *dev)
{
	struct ipa_eth_device *eth_dev;

	list_for_each_entry(eth_dev, &ipa_eth_devices, device_list) {
		if (eth_dev->dev == dev)
			return eth_dev;
	}

	return NULL;
}

static ssize_t ipa_eth_dev_write_init(struct file *file,
				      const char __user *user_buf,
				      size_t count, loff_t *ppos)
@@ -626,6 +622,8 @@ static void ipa_eth_unpair_devices(struct ipa_eth_offload_driver *od)

int ipa_eth_register_device(struct ipa_eth_device *eth_dev)
{
	int rc;

	if (!eth_dev->dev) {
		ipa_eth_dev_err(eth_dev, "Device is NULL");
		return -EINVAL;
@@ -640,8 +638,23 @@ int ipa_eth_register_device(struct ipa_eth_device *eth_dev)
	eth_dev->pm_handle = IPA_PM_MAX_CLIENTS;
	INIT_WORK(&eth_dev->refresh, __ipa_eth_refresh_device);

	INIT_LIST_HEAD(&eth_dev->rx_channels);
	INIT_LIST_HEAD(&eth_dev->tx_channels);

	eth_dev->init = eth_dev->start = !ipa_eth_noauto;

	rc = ipa_eth_net_open_device(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to open network device");
		return rc;
	}

	if (!eth_dev->net_dev) {
		ipa_eth_dev_err(eth_dev, "Netdev info is missing");
		ipa_eth_net_close_device(eth_dev);
		return -EFAULT;
	}

	mutex_lock(&ipa_eth_devices_lock);

	list_add(&eth_dev->device_list, &ipa_eth_devices);
@@ -661,10 +674,11 @@ void ipa_eth_unregister_device(struct ipa_eth_device *eth_dev)

	__ipa_eth_unpair_device(eth_dev);
	list_del(&eth_dev->device_list);

	ipa_eth_dev_log(eth_dev, "Unregistered device");
	ipa_eth_net_close_device(eth_dev);

	mutex_unlock(&ipa_eth_devices_lock);

	ipa_eth_dev_log(eth_dev, "Unregistered device");
}

static phys_addr_t ipa_eth_vmalloc_to_pa(void *vaddr)
@@ -979,7 +993,7 @@ void *ipa_eth_get_ipc_logbuf_dbg(void)
}
EXPORT_SYMBOL(ipa_eth_get_ipc_logbuf_dbg);

#define IPA_ETH_IPC_LOG_PAGES 50
#define IPA_ETH_IPC_LOG_PAGES 128

static int ipa_eth_ipc_log_init(void)
{
+2 −0
Original line number Diff line number Diff line
@@ -10,6 +10,8 @@
 * GNU General Public License for more details.
 */

#include <linux/pci.h>

#include "ipa_eth_i.h"

static bool ipa_eth_bus_is_ready;
+130 −74
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ int ipa_eth_ep_init_headers(struct ipa_eth_device *eth_dev)
{
	int rc = 0;
	bool vlan_mode;
	const size_t num_hdrs = 2; // one each for IPv4 and IPv6
	const size_t num_hdrs = 2; /* one each for IPv4 and IPv6 */
	size_t hdr_alloc_sz = sizeof(struct ipa_ioc_add_hdr) +
				num_hdrs * sizeof(struct ipa_hdr_add);
	struct ipa_hdr_add *hdr_v4 = NULL;
@@ -156,7 +156,7 @@ int ipa_eth_ep_init_headers(struct ipa_eth_device *eth_dev)
	hdrs->commit = 1;
	hdrs->num_hdrs = num_hdrs;

	// Initialize IPv4 headers
	/* Initialize IPv4 headers */
	snprintf(hdr_v4->name, sizeof(hdr_v4->name), "%s_ipv4",
		eth_dev->net_dev->name);

@@ -165,7 +165,7 @@ int ipa_eth_ep_init_headers(struct ipa_eth_device *eth_dev)
	else
		ipa_eth_init_vlan_header_v4(eth_dev, hdr_v4);

	// Initialize IPv6 headers
	/* Initialize IPv6 headers */
	snprintf(hdr_v6->name, sizeof(hdr_v6->name), "%s_ipv6",
		eth_dev->net_dev->name);

@@ -184,10 +184,11 @@ int ipa_eth_ep_init_headers(struct ipa_eth_device *eth_dev)
}

static void ipa_eth_ep_init_tx_props_v4(struct ipa_eth_device *eth_dev,
		struct ipa_eth_channel *ch,
		struct ipa_ioc_tx_intf_prop *props)
{
	props->ip = IPA_IP_v4;
	props->dst_pipe = eth_dev->ch_tx->ipa_client;
	props->dst_pipe = ch->ipa_client;

	props->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
	snprintf(props->hdr_name, sizeof(props->hdr_name), "%s_ipv4",
@@ -196,10 +197,11 @@ static void ipa_eth_ep_init_tx_props_v4(struct ipa_eth_device *eth_dev,
}

static void ipa_eth_ep_init_tx_props_v6(struct ipa_eth_device *eth_dev,
		struct ipa_eth_channel *ch,
		struct ipa_ioc_tx_intf_prop *props)
{
	props->ip = IPA_IP_v6;
	props->dst_pipe = eth_dev->ch_tx->ipa_client;
	props->dst_pipe = ch->ipa_client;

	props->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
	snprintf(props->hdr_name, sizeof(props->hdr_name), "%s_ipv6",
@@ -207,25 +209,79 @@ static void ipa_eth_ep_init_tx_props_v6(struct ipa_eth_device *eth_dev,
}

static void ipa_eth_ep_init_rx_props_v4(struct ipa_eth_device *eth_dev,
		struct ipa_eth_channel *ch,
		struct ipa_ioc_rx_intf_prop *props)
{
	props->ip = IPA_IP_v4;
	props->src_pipe = eth_dev->ch_rx->ipa_client;
	props->src_pipe = ch->ipa_client;

	props->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;

	// TODO: what about attrib?
}

static void ipa_eth_ep_init_rx_props_v6(struct ipa_eth_device *eth_dev,
		struct ipa_eth_channel *ch,
		struct ipa_ioc_rx_intf_prop *props)
{
	props->ip = IPA_IP_v6;
	props->src_pipe = eth_dev->ch_rx->ipa_client;
	props->src_pipe = ch->ipa_client;

	props->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
}

static int ipa_eth_ep_init_tx_intf(struct ipa_eth_device *eth_dev,
		struct ipa_tx_intf *tx_intf)
{
	u32 num_props;
	struct list_head *l;
	struct ipa_eth_channel *ch;

	num_props = 0;
	list_for_each(l, &eth_dev->tx_channels)
		num_props += 2; /* one each for IPv4 and IPv6 */

	tx_intf->prop = kcalloc(num_props, sizeof(*tx_intf->prop), GFP_KERNEL);
	if (!tx_intf->prop) {
		ipa_eth_dev_err(eth_dev, "Failed to alloc tx props");
		return -ENOMEM;
	}

	tx_intf->num_props = 0;
	list_for_each_entry(ch, &eth_dev->tx_channels, channel_list) {
		ipa_eth_ep_init_tx_props_v4(eth_dev, ch,
			&tx_intf->prop[tx_intf->num_props++]);
		ipa_eth_ep_init_tx_props_v6(eth_dev, ch,
			&tx_intf->prop[tx_intf->num_props++]);
	}

	return 0;
}

static int ipa_eth_ep_init_rx_intf(struct ipa_eth_device *eth_dev,
		struct ipa_rx_intf *rx_intf)
{
	u32 num_props;
	struct list_head *l;
	struct ipa_eth_channel *ch;

	num_props = 0;
	list_for_each(l, &eth_dev->rx_channels)
		num_props += 2; /* one each for IPv4 and IPv6 */

	rx_intf->prop = kcalloc(num_props, sizeof(*rx_intf->prop), GFP_KERNEL);
	if (!rx_intf->prop) {
		ipa_eth_dev_err(eth_dev, "Failed to alloc rx props");
		return -ENOMEM;
	}

	// TODO: what about attrib?
	rx_intf->num_props = 0;
	list_for_each_entry(ch, &eth_dev->rx_channels, channel_list) {
		ipa_eth_ep_init_rx_props_v4(eth_dev, ch,
			&rx_intf->prop[rx_intf->num_props++]);
		ipa_eth_ep_init_rx_props_v6(eth_dev, ch,
			&rx_intf->prop[rx_intf->num_props++]);
	}

	return 0;
}

/**
@@ -240,27 +296,28 @@ static void ipa_eth_ep_init_rx_props_v6(struct ipa_eth_device *eth_dev,
 */
int ipa_eth_ep_register_interface(struct ipa_eth_device *eth_dev)
{
	int rc;
	struct ipa_tx_intf tx_intf;
	struct ipa_rx_intf rx_intf;
	const size_t num_props = 2; // one each for IPv4 and IPv6
	struct ipa_ioc_tx_intf_prop tx_props[num_props];
	struct ipa_ioc_rx_intf_prop rx_props[num_props];

	memset(&tx_props, 0, sizeof(tx_props));
	ipa_eth_ep_init_tx_props_v4(eth_dev, &tx_props[0]);
	ipa_eth_ep_init_tx_props_v6(eth_dev, &tx_props[1]);
	memset(&tx_intf, 0, sizeof(tx_intf));
	memset(&rx_intf, 0, sizeof(rx_intf));

	tx_intf.num_props = num_props;
	tx_intf.prop = tx_props;
	rc = ipa_eth_ep_init_tx_intf(eth_dev, &tx_intf);
	if (rc)
		goto free_and_exit;

	rc = ipa_eth_ep_init_rx_intf(eth_dev, &rx_intf);
	if (rc)
		goto free_and_exit;

	memset(&rx_props, 0, sizeof(rx_props));
	ipa_eth_ep_init_rx_props_v4(eth_dev, &rx_props[0]);
	ipa_eth_ep_init_rx_props_v6(eth_dev, &rx_props[1]);
	rc = ipa_register_intf(eth_dev->net_dev->name, &tx_intf, &rx_intf);

	rx_intf.num_props = num_props;
	rx_intf.prop = rx_props;
free_and_exit:
	kzfree(tx_intf.prop);
	kzfree(rx_intf.prop);

	return ipa_register_intf(eth_dev->net_dev->name, &tx_intf, &rx_intf);
	return rc;
}

/**
@@ -274,44 +331,16 @@ int ipa_eth_ep_unregister_interface(struct ipa_eth_device *eth_dev)
}

/**
 * ipa_eth_ep_init - Initialize IPA endpoint for a channel
 * @ch: Channel for which EP need to be initialized
 *
 * Return: 0 on success, negative errno otherwise
 * ipa_eth_ep_init_ctx - Initialize IPA endpoint context for a channel
 * @ch: Channel for which EP ctx need to be initialized
 * @vlan_mode: true if VLAN mode is enabled for the EP
 */
int ipa_eth_ep_init(struct ipa_eth_channel *ch)
void ipa_eth_ep_init_ctx(struct ipa_eth_channel *ch, bool vlan_mode)
{
	int rc = 0;
	bool vlan_mode;
	const bool client_prod = IPA_CLIENT_IS_PROD(ch->ipa_client);
	const int ep_num = ipa_get_ep_mapping(ch->ipa_client);

	struct ipa3_ep_context *ep_ctx = NULL;

	if (ep_num == IPA_EP_NOT_ALLOCATED) {
		ipa_eth_dev_err(ch->eth_dev,
				"Could not determine EP number for client %d",
				ch->ipa_client);
		rc = -EFAULT;
		goto err_exit;
	}

	ch->ipa_ep_num = ep_num;

	rc = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
	if (rc) {
		ipa_eth_dev_err(ch->eth_dev,
				"Could not determine IPA VLAN mode");
		goto err_exit;
	}
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	ep_ctx = &ipa3_ctx->ep[ep_num];
	if (ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev,
				"EP context is already initialiazed");
		rc = -EEXIST;
		goto err_exit;
	}
	if (ep_ctx->valid)
		return;

	memset(ep_ctx, 0, offsetof(typeof(*ep_ctx), sys));

@@ -320,30 +349,57 @@ int ipa_eth_ep_init(struct ipa_eth_channel *ch)
	ep_ctx->client_notify = ipa_ep_client_notifier;
	ep_ctx->priv = ch;

	ep_ctx->cfg.nat.nat_en =  client_prod ? IPA_SRC_NAT : IPA_BYPASS_NAT;
	ep_ctx->cfg.nat.nat_en = IPA_CLIENT_IS_PROD(ch->ipa_client) ?
					IPA_SRC_NAT : IPA_BYPASS_NAT;
	ep_ctx->cfg.hdr.hdr_len = vlan_mode ? VLAN_ETH_HLEN : ETH_HLEN;

	ep_ctx->cfg.mode.mode = IPA_BASIC;
}

/**
 * ipa_eth_ep_deinit_ctx - Deinitialize IPA endpoint context for a channel
 * @ch: Channel for which EP ctx need to be deinitialized
 */
void ipa_eth_ep_deinit_ctx(struct ipa_eth_channel *ch)
{
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

#ifdef IPA_ETH_DMA_MODE
	if (IPA_ETH_CH_IS_RX(ch)) {
		ep_ctx->cfg.mode.mode = IPA_DMA;
		ep_ctx->cfg.mode.dst = IPA_CLIENT_AQC_ETHERNET_CONS;
	if (!ep_ctx->valid)
		return;

	ep_ctx->valid = false;

	memset(ep_ctx, 0, offsetof(typeof(*ep_ctx), sys));
}

/**
 * ipa_eth_ep_init - Initialize IPA endpoint for a channel
 * @ch: Channel for which EP need to be initialized
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_ep_init(struct ipa_eth_channel *ch)
{
	int rc = 0;
	struct ipa3_ep_context *ep_ctx = NULL;

	ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];
	if (!ep_ctx->valid) {
		ipa_eth_dev_bug(ch->eth_dev, "EP context is not initialiazed");
		return -EFAULT;
	}
#endif

	IPA_ACTIVE_CLIENTS_INC_SIMPLE();

	rc = ipa3_cfg_ep(ep_num, &ep_ctx->cfg);
	rc = ipa3_cfg_ep(ch->ipa_ep_num, &ep_ctx->cfg);
	if (rc) {
		ipa_eth_dev_err(ch->eth_dev,
				"Failed to configure EP %d", ep_num);
				"Failed to configure EP %d", ch->ipa_ep_num);
		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
		goto err_exit;
	}

	if (IPA_ETH_CH_IS_RX(ch))
		ipa3_install_dflt_flt_rules(ep_num);
	if (IPA_CLIENT_IS_PROD(ch->ipa_client))
		ipa3_install_dflt_flt_rules(ch->ipa_ep_num);

	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

Loading