Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fde383ce authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: IPA offload subsystem for ethernet devices"

parents 2cf3ec57 15c887d0
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -10,12 +10,7 @@ ipa-eth-y := \
	ipa_eth_bus.o \
	ipa_eth.o \
	ipa_eth_debugfs.o \
	ipa_eth_ep.o \
	ipa_eth_gsi.o \
	ipa_eth_net.o \
	ipa_eth_offload.o \
	ipa_eth_pci.o \
	ipa_eth_pm.o \
	ipa_eth_trace.o \
	ipa_eth_uc.o \
	ipa_eth_utils.o
+7 −302
Original line number Diff line number Diff line
@@ -83,20 +83,6 @@ static int ipa_eth_init_device(struct ipa_eth_device *eth_dev)
	if (eth_dev->of_state != IPA_ETH_OF_ST_DEINITED)
		return -EFAULT;

	rc = ipa_eth_ep_init_headers(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to init EP headers");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_pm_register(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to register with IPA PM");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_offload_init(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to init offload");
@@ -104,11 +90,6 @@ static int ipa_eth_init_device(struct ipa_eth_device *eth_dev)
		return rc;
	}

	rc = ipa_eth_uc_stats_init(eth_dev);
	if (rc)
		ipa_eth_dev_err(eth_dev,
			"Failed to init uC stats monitor, continuing.");

	ipa_eth_dev_log(eth_dev, "Initialized device");

	eth_dev->of_state = IPA_ETH_OF_ST_INITED;
@@ -126,11 +107,6 @@ static int ipa_eth_deinit_device(struct ipa_eth_device *eth_dev)
	if (eth_dev->of_state != IPA_ETH_OF_ST_INITED)
		return -EFAULT;

	rc = ipa_eth_uc_stats_deinit(eth_dev);
	if (rc)
		ipa_eth_dev_err(eth_dev,
			"Failed to deinit uC stats monitor, continuing.");

	rc = ipa_eth_offload_deinit(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to deinit offload");
@@ -138,20 +114,6 @@ static int ipa_eth_deinit_device(struct ipa_eth_device *eth_dev)
		return rc;
	}

	rc = ipa_eth_pm_unregister(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to unregister with IPA PM");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_ep_deinit_headers(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to deinit EP headers");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Deinitialized device");

	eth_dev->of_state = IPA_ETH_OF_ST_DEINITED;
@@ -169,13 +131,6 @@ static int ipa_eth_start_device(struct ipa_eth_device *eth_dev)
	if (eth_dev->of_state != IPA_ETH_OF_ST_INITED)
		return -EFAULT;

	rc = ipa_eth_pm_activate(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to activate device PM");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_offload_start(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to start offload");
@@ -183,33 +138,6 @@ static int ipa_eth_start_device(struct ipa_eth_device *eth_dev)
		return rc;
	}

	rc = ipa_eth_bus_disable_pc(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev,
			"Failed to disable bus power collapse");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	/* We cannot register the interface during offload init phase because
	 * it will cause IPACM to install IPA filter rules when it receives a
	 * link-up netdev event, even if offload path is not started and/or no
	 * ECM_CONNECT event is received from the driver. Since installing IPA
	 * filter rules while offload path is stopped can cause DL data stall,
	 * register the interface only after the offload path is started.
	 */
	rc = ipa_eth_ep_register_interface(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to register EP interface");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_uc_stats_start(eth_dev);
	if (rc)
		ipa_eth_dev_err(eth_dev,
			"Failed to start uC stats monitor, continuing.");

	ipa_eth_dev_log(eth_dev, "Started device");

	eth_dev->of_state = IPA_ETH_OF_ST_STARTED;
@@ -227,26 +155,6 @@ static int ipa_eth_stop_device(struct ipa_eth_device *eth_dev)
	if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED)
		return -EFAULT;

	rc = ipa_eth_uc_stats_stop(eth_dev);
	if (rc)
		ipa_eth_dev_err(eth_dev,
			"Failed to stop uC stats monitor, continuing.");

	rc = ipa_eth_ep_unregister_interface(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to unregister IPA interface");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_bus_enable_pc(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev,
			"Failed to enable bus power collapse");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	rc = ipa_eth_offload_stop(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to stop offload");
@@ -254,13 +162,6 @@ static int ipa_eth_stop_device(struct ipa_eth_device *eth_dev)
		return rc;
	}

	rc = ipa_eth_pm_deactivate(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to deactivate device PM");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Stopped device");

	eth_dev->of_state = IPA_ETH_OF_ST_INITED;
@@ -285,9 +186,7 @@ static void ipa_eth_device_refresh(struct ipa_eth_device *eth_dev)

	if (initable(eth_dev)) {
		if (eth_dev->of_state == IPA_ETH_OF_ST_DEINITED) {
			IPA_ACTIVE_CLIENTS_INC_SIMPLE();
			(void) ipa_eth_init_device(eth_dev);
			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
@@ -298,37 +197,17 @@ static void ipa_eth_device_refresh(struct ipa_eth_device *eth_dev)
	}

	if (startable(eth_dev)) {
		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
		(void) ipa_eth_start_device(eth_dev);
		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

		if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED) {
			ipa_eth_dev_err(eth_dev, "Failed to start device");
			return;
		}

		if (ipa_eth_net_register_upper(eth_dev)) {
			ipa_eth_dev_err(eth_dev,
				"Failed to register upper interfaces");
			eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		}

		if (ipa_eth_pm_vote_bw(eth_dev))
			ipa_eth_dev_err(eth_dev,
					"Failed to vote for required BW");
	} else {
		ipa_eth_dev_log(eth_dev, "Start is disallowed for the device");

		if (ipa_eth_net_unregister_upper(eth_dev)) {
			ipa_eth_dev_err(eth_dev,
				"Failed to unregister upper interfaces");
			eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		}

		if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED) {
			IPA_ACTIVE_CLIENTS_INC_SIMPLE();
			ipa_eth_stop_device(eth_dev);
			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
@@ -341,165 +220,7 @@ static void ipa_eth_device_refresh(struct ipa_eth_device *eth_dev)
	if (!initable(eth_dev)) {
		ipa_eth_dev_log(eth_dev, "Init is disallowed for the device");

		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
		ipa_eth_deinit_device(eth_dev);
		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

		if (eth_dev->of_state != IPA_ETH_OF_ST_DEINITED) {
			ipa_eth_dev_err(eth_dev, "Failed to deinit device");
			return;
		}
	}
}

static int ipa_eth_init_device_skip_ipa(struct ipa_eth_device *eth_dev)
{
	int rc;

	if (eth_dev->of_state == IPA_ETH_OF_ST_INITED)
		return 0;

	if (eth_dev->of_state != IPA_ETH_OF_ST_DEINITED)
		return -EFAULT;

	rc = ipa_eth_offload_init(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to init offload");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Initialized device");

	eth_dev->of_state = IPA_ETH_OF_ST_INITED;

	return 0;
}

static int ipa_eth_deinit_device_skip_ipa(struct ipa_eth_device *eth_dev)
{
	int rc;

	if (eth_dev->of_state == IPA_ETH_OF_ST_DEINITED)
		return 0;

	if (eth_dev->of_state != IPA_ETH_OF_ST_INITED)
		return -EFAULT;

	rc = ipa_eth_offload_deinit(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to deinit offload");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Deinitialized device");

	eth_dev->of_state = IPA_ETH_OF_ST_DEINITED;

	return 0;
}

static int ipa_eth_start_device_skip_ipa(struct ipa_eth_device *eth_dev)
{
	int rc;

	if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED)
		return 0;

	if (eth_dev->of_state != IPA_ETH_OF_ST_INITED)
		return -EFAULT;

	rc = ipa_eth_offload_start(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to start offload");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Started device");

	eth_dev->of_state = IPA_ETH_OF_ST_STARTED;

	return 0;
}

static int ipa_eth_stop_device_skip_ipa(struct ipa_eth_device *eth_dev)
{
	int rc;

	if (eth_dev->of_state == IPA_ETH_OF_ST_DEINITED)
		return 0;

	if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED)
		return -EFAULT;

	rc = ipa_eth_offload_stop(eth_dev);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to stop offload");
		eth_dev->of_state = IPA_ETH_OF_ST_ERROR;
		return rc;
	}

	ipa_eth_dev_log(eth_dev, "Stopped device");

	eth_dev->of_state = IPA_ETH_OF_ST_INITED;

	return 0;
}

static void ipa_eth_device_refresh_skip_ipa(struct ipa_eth_device *eth_dev)
{
	ipa_eth_dev_log(eth_dev, "Refreshing offload state for device");

	if (!ipa_eth_offload_device_paired(eth_dev)) {
		ipa_eth_dev_log(eth_dev, "Device is not paired. Skipping.");
		return;
	}

	if (eth_dev->of_state == IPA_ETH_OF_ST_ERROR) {
		ipa_eth_dev_err(eth_dev,
				"Device in ERROR state, skipping refresh");
		return;
	}

	if (initable(eth_dev)) {
		if (eth_dev->of_state == IPA_ETH_OF_ST_DEINITED) {
			(void) ipa_eth_init_device_skip_ipa(eth_dev);

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
						"Failed to init device");
				return;
			}
		}
	}

	if (startable(eth_dev)) {
		(void) ipa_eth_start_device_skip_ipa(eth_dev);

		if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED) {
			ipa_eth_dev_err(eth_dev, "Failed to start device");
			return;
		}
	} else {
		ipa_eth_dev_log(eth_dev, "Start is disallowed for the device");

		if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED) {
			ipa_eth_stop_device_skip_ipa(eth_dev);

			if (eth_dev->of_state != IPA_ETH_OF_ST_INITED) {
				ipa_eth_dev_err(eth_dev,
						"Failed to stop device");
				return;
			}
		}
	}

	if (!initable(eth_dev)) {
		ipa_eth_dev_log(eth_dev, "Init is disallowed for the device");

		ipa_eth_deinit_device_skip_ipa(eth_dev);

		if (eth_dev->of_state != IPA_ETH_OF_ST_DEINITED) {
			ipa_eth_dev_err(eth_dev, "Failed to deinit device");
@@ -513,9 +234,6 @@ static void ipa_eth_device_refresh_work(struct work_struct *work)
	struct ipa_eth_device *eth_dev = container_of(work,
				struct ipa_eth_device, refresh);

	if (unlikely(eth_dev->skip_ipa))
		ipa_eth_device_refresh_skip_ipa(eth_dev);
	else
	ipa_eth_device_refresh(eth_dev);
}

@@ -570,9 +288,7 @@ static int ipa_eth_device_prepare_reset(
	 */
	set_bit(IPA_ETH_DEV_F_RESETTING, &eth_dev->flags);

	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
	rc = ipa_eth_offload_prepare_reset(eth_dev, data);
	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

	return rc;
}
@@ -582,9 +298,7 @@ static int ipa_eth_device_complete_reset(
{
	int rc = 0;

	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
	rc = ipa_eth_offload_complete_reset(eth_dev, data);
	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();

	/* Clear the flag before unlocking the mutex so that blocked threads
	 * can resume with the updated value.
@@ -657,9 +371,9 @@ int ipa_eth_device_notify(struct ipa_eth_device *eth_dev,
}
EXPORT_SYMBOL(ipa_eth_device_notify);

static void ipa_eth_dev_start_timer_cb(unsigned long data)
static void ipa_eth_dev_start_timer_cb(struct timer_list *t)
{
	struct ipa_eth_device *eth_dev = (struct ipa_eth_device *)data;
	struct ipa_eth_device *eth_dev = from_timer(eth_dev, t, start_timer);

	ipa_eth_dev_log(eth_dev, "Start timer has fired");

@@ -771,25 +485,13 @@ struct ipa_eth_device *ipa_eth_alloc_device(
	eth_dev->dev = dev;
	eth_dev->nd = nd;

	/* Network/offload driver supports direct call to IPA driver. Skip IPA
	 * driver calls for the device.
	 */
	if (nd->features & IPA_ETH_DEV_F_IPA_API) {
		ipa_eth_dev_log(eth_dev, "Device requests for skipping IPA");
		eth_dev->skip_ipa = true;
	}

	eth_dev->of_state = IPA_ETH_OF_ST_DEINITED;
	eth_dev->pm_handle = IPA_PM_MAX_CLIENTS;
	INIT_WORK(&eth_dev->refresh, ipa_eth_device_refresh_work);

	INIT_LIST_HEAD(&eth_dev->rx_channels);
	INIT_LIST_HEAD(&eth_dev->tx_channels);

	init_timer(&eth_dev->start_timer);

	eth_dev->start_timer.function = ipa_eth_dev_start_timer_cb;
	eth_dev->start_timer.data = (unsigned long)eth_dev;
	timer_setup(&eth_dev->start_timer, ipa_eth_dev_start_timer_cb, 0);

	eth_dev->init = eth_dev->start = !ipa_eth_noauto;

@@ -974,6 +676,7 @@ void ipa_eth_unregister_net_driver(struct ipa_eth_net_driver *nd)
}
EXPORT_SYMBOL(ipa_eth_unregister_net_driver);


/**
 * ipa_eth_register_offload_driver - Register an offload driver with the offload
 *                                   subsystem
@@ -1105,6 +808,7 @@ int ipa_eth_init(void)
err_ipclog:
	return rc;
}
module_init(ipa_eth_init);

void ipa_eth_exit(void)
{
@@ -1133,3 +837,4 @@ void ipa_eth_exit(void)

	ipa_eth_ipc_log_cleanup();
}
module_exit(ipa_eth_exit);
+1 −6
Original line number Diff line number Diff line
@@ -237,13 +237,8 @@ static const struct file_operations fops_eth_dev_ready = {

int ipa_eth_debugfs_init(void)
{
	struct dentry *ipa_debugfs = ipa_debugfs_get_root();

	if (IS_ERR_OR_NULL(ipa_debugfs))
		return -EFAULT;

	ipa_eth_debugfs =
		debugfs_create_dir("ethernet", ipa_debugfs);
		debugfs_create_dir("ethernet", NULL);
	if (IS_ERR_OR_NULL(ipa_eth_debugfs)) {
		ipa_eth_log("Unable to create debugfs root");
		goto err_exit;
+0 −680

File deleted.

Preview size limit exceeded, changes collapsed.

+0 −371
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved
 */

#include <linux/msm_gsi.h>

#include "ipa_eth_i.h"

static void ipa_eth_gsi_ev_err(struct gsi_evt_err_notify *notify)
{
	struct ipa_eth_channel *ch = notify->user_data;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	ipa_eth_dev_err(ch->eth_dev,
			"Error (id=%d, edesc=%04x) in GSI event ring %u",
			notify->evt_id, notify->err_desc,
			ep_ctx->gsi_evt_ring_hdl);
}

static void ipa_eth_gsi_ch_err(struct gsi_chan_err_notify *notify)
{
	struct ipa_eth_channel *ch = notify->chan_user_data;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	ipa_eth_dev_err(ch->eth_dev,
			"Error (id=%d, edesc=%04x) in GSI channel %u",
			notify->evt_id, notify->err_desc,
			ep_ctx->gsi_chan_hdl);
}

/*
 * ipa_eth_gsi_alloc() - Allocate GSI channel and event ring for an offload
 *                       channel, optionally writing to the ring scratch
 *                       register and fetch the ring doorbell address
 * @ch: Offload channel
 * @gsi_ev_props: Properties of the GSI event ring to be allocated
 * @gsi_ev_scratch: Optional. Points to the value to be written to GSI
 *                  event ring scratch register
 * @gsi_ev_db: Optional. Writes event ring doorbell LSB address to the location
 *                       pointed to by the argument
 * @gsi_ch_props: Properties of the GSI channel to be allocated
 * @gsi_ch_scratch: Optional. Points to the value to be written to GSI
 *                  event ring scratch register
 * @gsi_ch_db: Optional. Writes channel doorbell LSB address to the location
 *                       pointed to by the argument
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_alloc(struct ipa_eth_channel *ch,
		      struct gsi_evt_ring_props *gsi_ev_props,
		      union gsi_evt_scratch *gsi_ev_scratch,
		      phys_addr_t *gsi_ev_db,
		      struct gsi_chan_props *gsi_ch_props,
		      union gsi_channel_scratch *gsi_ch_scratch,
		      phys_addr_t *gsi_ch_db)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	const struct ipa_gsi_ep_config *gsi_ep_cfg;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_ctx->client);
	if (!gsi_ep_cfg) {
		ipa_eth_dev_err(ch->eth_dev, "Failed to obtain GSI EP info");
		return -EFAULT;
	}

	if (!gsi_ev_props->err_cb) {
		gsi_ev_props->err_cb = ipa_eth_gsi_ev_err;
		gsi_ev_props->user_data = ch;
	}

	gsi_rc = gsi_alloc_evt_ring(gsi_ev_props, ipa3_ctx->gsi_dev_hdl,
		&ep_ctx->gsi_evt_ring_hdl);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev, "Failed to alloc GSI event ring");
		return -EFAULT;
	}

	ipa_eth_dev_dbg(ch->eth_dev, "GSI event ring handle is %lu",
			ep_ctx->gsi_evt_ring_hdl);

	if (gsi_ev_db) {
		u32 db_addr_lsb = 0;
		u32 db_addr_msb = 0;

		gsi_rc = gsi_query_evt_ring_db_addr(ep_ctx->gsi_evt_ring_hdl,
			&db_addr_lsb, &db_addr_msb);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to get DB address for event ring %lu",
				ep_ctx->gsi_evt_ring_hdl);
			goto err_free_ev;
		}

		ipa_eth_dev_dbg(ch->eth_dev,
				"GSI event ring %lu DB address LSB is 0x%08x",
				ep_ctx->gsi_evt_ring_hdl, db_addr_lsb);
		ipa_eth_dev_dbg(ch->eth_dev,
				"GSI event ring %lu DB address MSB is 0x%08x",
				ep_ctx->gsi_evt_ring_hdl, db_addr_msb);

		*gsi_ev_db = db_addr_lsb;
	}

	if (gsi_ev_scratch) {
		gsi_rc = gsi_write_evt_ring_scratch(ep_ctx->gsi_evt_ring_hdl,
				*gsi_ev_scratch);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to write scratch for event ring %lu",
				ep_ctx->gsi_evt_ring_hdl);
			goto err_free_ev;
		}
	}

	gsi_ch_props->ch_id = gsi_ep_cfg->ipa_gsi_chan_num;
	gsi_ch_props->evt_ring_hdl = ep_ctx->gsi_evt_ring_hdl;

	gsi_ch_props->prefetch_mode = gsi_ep_cfg->prefetch_mode;
	gsi_ch_props->empty_lvl_threshold = gsi_ep_cfg->prefetch_threshold;

	if (!gsi_ch_props->err_cb) {
		gsi_ch_props->err_cb = ipa_eth_gsi_ch_err;
		gsi_ch_props->chan_user_data = ch;
	}

	gsi_rc = gsi_alloc_channel(gsi_ch_props, ipa3_ctx->gsi_dev_hdl,
		&ep_ctx->gsi_chan_hdl);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev, "Failed to alloc GSI channel");
		goto err_free_ev;
	}

	ipa_eth_dev_dbg(ch->eth_dev, "GSI channel handle is %lu",
			ep_ctx->gsi_chan_hdl);

	if (gsi_ch_db) {
		u32 db_addr_lsb = 0;
		u32 db_addr_msb = 0;

		gsi_rc = gsi_query_channel_db_addr(ep_ctx->gsi_chan_hdl,
			&db_addr_lsb, &db_addr_msb);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to get DB address for channel %lu",
				ep_ctx->gsi_chan_hdl);
			goto err_free_ch;
		}

		ipa_eth_dev_dbg(ch->eth_dev,
				"GSI channel %lu DB address LSB is 0x%08x",
				ep_ctx->gsi_chan_hdl, db_addr_lsb);
		ipa_eth_dev_dbg(ch->eth_dev,
				"GSI channel %lu DB address MSB is 0x%08x",
				ep_ctx->gsi_chan_hdl, db_addr_msb);

		*gsi_ch_db = db_addr_lsb;
	}

	if (gsi_ch_scratch) {
		gsi_rc = gsi_write_channel_scratch(ep_ctx->gsi_chan_hdl,
				*gsi_ch_scratch);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to write scratch for channel %lu",
				ep_ctx->gsi_chan_hdl);
			goto err_free_ch;
		}
	}

	return 0;

err_free_ch:
	if (gsi_dealloc_channel(ep_ctx->gsi_chan_hdl))
		ipa_eth_dev_err(ch->eth_dev,
				"Failed to dealloc GSI channel %lu",
				ep_ctx->gsi_chan_hdl);

	ep_ctx->gsi_chan_hdl = ~0;

err_free_ev:
	if (gsi_dealloc_evt_ring(ep_ctx->gsi_evt_ring_hdl))
		ipa_eth_dev_err(ch->eth_dev,
				"Failed to dealloc GSI event ring %lu",
				ep_ctx->gsi_evt_ring_hdl);

	ep_ctx->gsi_evt_ring_hdl = ~0;

	return gsi_rc;
}
EXPORT_SYMBOL(ipa_eth_gsi_alloc);

/**
 * ipa_eth_gsi_dealloc() - De-allocate GSI event ring and channel associated
 *                         with an offload channel, previously allocated with
 *                         ipa_eth_gsi_alloc()
 * @ch: Offload channel
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_dealloc(struct ipa_eth_channel *ch)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	if (ep_ctx->gsi_chan_hdl != ~0) {
		gsi_rc = gsi_reset_channel(ep_ctx->gsi_chan_hdl);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to reset channel %u",
				ep_ctx->gsi_chan_hdl);
			return gsi_rc;
		}

		gsi_rc = gsi_dealloc_channel(ep_ctx->gsi_chan_hdl);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
					"Failed to dealloc channel %lu",
					ep_ctx->gsi_chan_hdl);
			return gsi_rc;
		}

		ep_ctx->gsi_chan_hdl = ~0;
	}

	if (ep_ctx->gsi_evt_ring_hdl != ~0) {
		gsi_rc = gsi_reset_evt_ring(ep_ctx->gsi_evt_ring_hdl);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
				"Failed to reset event ring %lu",
				ep_ctx->gsi_evt_ring_hdl);
			return gsi_rc;
		}

		gsi_rc = gsi_dealloc_evt_ring(ep_ctx->gsi_evt_ring_hdl);
		if (gsi_rc != GSI_STATUS_SUCCESS) {
			ipa_eth_dev_err(ch->eth_dev,
					"Failed to dealloc event ring %lu",
					ep_ctx->gsi_evt_ring_hdl);
			return gsi_rc;
		}

		ep_ctx->gsi_evt_ring_hdl = ~0;
	}

	return 0;
}
EXPORT_SYMBOL(ipa_eth_gsi_dealloc);

/*
 * ipa_eth_gsi_ring_evtring() - Ring an offload channel event ring doorbell
 * @ch: Offload channel associated with the event ring
 * @value: Value to write to the doorbell
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_ring_evtring(struct ipa_eth_channel *ch, u64 value)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	gsi_rc = gsi_ring_evt_ring_db(ep_ctx->gsi_evt_ring_hdl, value);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev,
				"Failed to ring DB for event ring %lu",
				ep_ctx->gsi_evt_ring_hdl);
		return gsi_rc;
	}

	return 0;
}
EXPORT_SYMBOL(ipa_eth_gsi_ring_evtring);

/*
 * ipa_eth_gsi_ring_channel() - Ring an offload channel GSI channel doorbell
 * @ch: Offload channel associated with the GSI channel
 * @value: Value to write to the doorbell
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_ring_channel(struct ipa_eth_channel *ch, u64 value)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	gsi_rc = gsi_ring_ch_ring_db(ep_ctx->gsi_chan_hdl, value);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev,
				"Failed to ring DB for channel %lu",
				ep_ctx->gsi_chan_hdl);
		return gsi_rc;
	}

	return 0;
}
EXPORT_SYMBOL(ipa_eth_gsi_ring_channel);

/**
 * ipa_eth_gsi_start() - Start GSI channel associated with offload channel
 * @ch: Offload channel
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_start(struct ipa_eth_channel *ch)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	gsi_rc = gsi_start_channel(ep_ctx->gsi_chan_hdl);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev, "Failed to start GSI channel %lu",
				ep_ctx->gsi_chan_hdl);
		return gsi_rc;
	}

	return 0;
}
EXPORT_SYMBOL(ipa_eth_gsi_start);

/**
 * ipa_eth_gsi_stop() - Stop GSI channel associated with offload channel
 * @ch: Offload channel
 *
 * Return: 0 on success, negative errno otherwise
 */
int ipa_eth_gsi_stop(struct ipa_eth_channel *ch)
{
	enum gsi_status gsi_rc = GSI_STATUS_SUCCESS;
	struct ipa3_ep_context *ep_ctx = &ipa3_ctx->ep[ch->ipa_ep_num];

	if (!ep_ctx->valid) {
		ipa_eth_dev_err(ch->eth_dev, "EP context is not initialized");
		return -EFAULT;
	}

	gsi_rc = gsi_stop_channel(ep_ctx->gsi_chan_hdl);
	if (gsi_rc != GSI_STATUS_SUCCESS) {
		ipa_eth_dev_err(ch->eth_dev, "Failed to stop GSI channel %lu",
				ep_ctx->gsi_chan_hdl);
		return gsi_rc;
	}

	return 0;
}
EXPORT_SYMBOL(ipa_eth_gsi_stop);
Loading