Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a8be665 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "coresight-tmc-etr: Add QDSS IPA bam connection support"

parents 95e63a81 2ed59bfb
Loading
Loading
Loading
Loading
+209 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#define USB_SG_NUM (USB_BLK_SIZE / PAGE_SIZE)
#define USB_BUF_NUM 255
#define USB_TIME_OUT (5 * HZ)
#define PCIE_BLK_SIZE 32768

static struct tmc_drvdata *tmcdrvdata;

@@ -54,6 +55,9 @@ static irqreturn_t etr_handler(int irq, void *data)
	} else if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
		atomic_inc(&byte_cntr_data->irq_cnt);
		wake_up(&byte_cntr_data->wq);
	} else if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
		atomic_inc(&byte_cntr_data->irq_cnt);
		wake_up(&byte_cntr_data->pcie_wait_wq);
	}
	return IRQ_HANDLED;

@@ -176,6 +180,49 @@ void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
}
EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);

static void etr_pcie_close_channel(struct byte_cntr *byte_cntr_data)
{
	if (!byte_cntr_data)
		return;

	mutex_lock(&byte_cntr_data->byte_cntr_lock);
	mhi_dev_close_channel(byte_cntr_data->out_handle);
	byte_cntr_data->pcie_chan_opened = false;
	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
}

int etr_pcie_start(struct byte_cntr *byte_cntr_data)
{
	if (!byte_cntr_data)
		return -ENOMEM;

	mutex_lock(&byte_cntr_data->byte_cntr_lock);
	coresight_csr_set_byte_cntr(byte_cntr_data->csr, PCIE_BLK_SIZE / 8);
	atomic_set(&byte_cntr_data->irq_cnt, 0);
	mutex_unlock(&byte_cntr_data->byte_cntr_lock);

	if (!byte_cntr_data->pcie_chan_opened)
		queue_work(byte_cntr_data->pcie_wq,
				&byte_cntr_data->pcie_open_work);

	queue_work(byte_cntr_data->pcie_wq, &byte_cntr_data->pcie_write_work);
	return 0;
}
EXPORT_SYMBOL(etr_pcie_start);

void etr_pcie_stop(struct byte_cntr *byte_cntr_data)
{
	if (!byte_cntr_data)
		return;

	etr_pcie_close_channel(byte_cntr_data);
	wake_up(&byte_cntr_data->pcie_wait_wq);

	mutex_lock(&byte_cntr_data->byte_cntr_lock);
	coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
}
EXPORT_SYMBOL(etr_pcie_stop);

static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
{
@@ -609,6 +656,167 @@ void usb_bypass_notifier(void *priv, unsigned int event,
}
EXPORT_SYMBOL(usb_bypass_notifier);

static void etr_pcie_client_cb(struct mhi_dev_client_cb_data *cb_data)
{
	struct byte_cntr *byte_cntr_data = NULL;

	if (!cb_data)
		return;

	byte_cntr_data = cb_data->user_data;
	if (!byte_cntr_data)
		return;

	switch (cb_data->ctrl_info) {
	case  MHI_STATE_CONNECTED:
		if (cb_data->channel == byte_cntr_data->pcie_out_chan) {
			dev_dbg(&tmcdrvdata->csdev->dev, "PCIE out channel connected.\n");
			queue_work(byte_cntr_data->pcie_wq,
					&byte_cntr_data->pcie_open_work);
		}

		break;
	case MHI_STATE_DISCONNECTED:
		if (cb_data->channel == byte_cntr_data->pcie_out_chan) {
			dev_dbg(&tmcdrvdata->csdev->dev,
				"PCIE out channel disconnected.\n");
			etr_pcie_close_channel(byte_cntr_data);
		}
		break;
	default:
		break;
	}
}

static void etr_pcie_write_complete_cb(void *req)
{
	struct mhi_req *mreq = req;

	if (!mreq)
		return;
	kfree(req);
}

static void etr_pcie_open_work_fn(struct work_struct *work)
{
	int ret = 0;
	struct byte_cntr *byte_cntr_data = container_of(work,
					      struct byte_cntr,
					      pcie_open_work);

	if (!byte_cntr_data)
		return;

	/* Open write channel*/
	ret = mhi_dev_open_channel(byte_cntr_data->pcie_out_chan,
			&byte_cntr_data->out_handle,
			NULL);
	if (ret < 0) {
		dev_err(&tmcdrvdata->csdev->dev, "%s: open pcie out channel fail %d\n",
						__func__, ret);
	} else {
		dev_dbg(&tmcdrvdata->csdev->dev,
				"Open pcie out channel successfully\n");
		mutex_lock(&byte_cntr_data->byte_cntr_lock);
		byte_cntr_data->pcie_chan_opened = true;
		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
	}

}

static void etr_pcie_write_work_fn(struct work_struct *work)
{
	int ret = 0;
	struct mhi_req *req;
	size_t actual;
	int bytes_to_write;
	char *buf;

	struct byte_cntr *byte_cntr_data = container_of(work,
						struct byte_cntr,
						pcie_write_work);

	while (tmcdrvdata->enable
		&& tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
		if (!atomic_read(&byte_cntr_data->irq_cnt)) {
			ret =  wait_event_interruptible(
				byte_cntr_data->pcie_wait_wq,
				atomic_read(&byte_cntr_data->irq_cnt) > 0
				|| !tmcdrvdata->enable
				|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_PCIE
				|| !byte_cntr_data->pcie_chan_opened);
			if (ret == -ERESTARTSYS || !tmcdrvdata->enable
			|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_PCIE
			|| !byte_cntr_data->pcie_chan_opened)
				break;
		}

		actual = PCIE_BLK_SIZE;
		buf = (char *)(tmcdrvdata->buf + byte_cntr_data->offset);
		req = kzalloc(sizeof(*req), GFP_KERNEL);
		if (!req)
			break;

		tmc_etr_read_bytes(byte_cntr_data, (loff_t *)&byte_cntr_data->offset,
					PCIE_BLK_SIZE, &actual, &buf);

		if (actual <= 0) {
			kfree(req);
			req = NULL;
			break;
		}

		req->buf = buf;
		req->client = byte_cntr_data->out_handle;
		req->context = byte_cntr_data;
		req->len = actual;
		req->chan = byte_cntr_data->pcie_out_chan;
		req->mode = DMA_ASYNC;
		req->client_cb = etr_pcie_write_complete_cb;
		req->snd_cmpl = 1;

		bytes_to_write = mhi_dev_write_channel(req);
		if (bytes_to_write != PCIE_BLK_SIZE) {
			dev_err(&tmcdrvdata->csdev->dev, "Write error %d\n",
							bytes_to_write);

			kfree(req);
			req = NULL;
			break;
		}

		mutex_lock(&byte_cntr_data->byte_cntr_lock);
		if (byte_cntr_data->offset + actual >= tmcdrvdata->size)
			byte_cntr_data->offset = 0;
		else
			byte_cntr_data->offset += actual;
		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
	}
}

int etr_register_pcie_channel(struct byte_cntr *byte_cntr_data)
{
	return mhi_register_state_cb(etr_pcie_client_cb, byte_cntr_data,
					byte_cntr_data->pcie_out_chan);
}

static int etr_pcie_init(struct byte_cntr *byte_cntr_data)
{
	if (!byte_cntr_data)
		return -EIO;

	byte_cntr_data->pcie_out_chan = MHI_CLIENT_QDSS_IN;
	byte_cntr_data->offset = 0;
	byte_cntr_data->pcie_chan_opened = false;
	INIT_WORK(&byte_cntr_data->pcie_open_work, etr_pcie_open_work_fn);
	INIT_WORK(&byte_cntr_data->pcie_write_work, etr_pcie_write_work_fn);
	init_waitqueue_head(&byte_cntr_data->pcie_wait_wq);
	byte_cntr_data->pcie_wq = create_singlethread_workqueue("etr_pcie");
	if (!byte_cntr_data->pcie_wq)
		return -ENOMEM;

	return etr_register_pcie_channel(byte_cntr_data);
}

static int usb_bypass_init(struct byte_cntr *byte_cntr_data)
{
@@ -673,6 +881,7 @@ struct byte_cntr *byte_cntr_init(struct amba_device *adev,
	init_waitqueue_head(&byte_cntr_data->wq);
	mutex_init(&byte_cntr_data->byte_cntr_lock);

	etr_pcie_init(byte_cntr_data);
	return byte_cntr_data;
}
EXPORT_SYMBOL(byte_cntr_init);
+12 −2
Original line number Diff line number Diff line
@@ -8,12 +8,13 @@
#include <linux/amba/bus.h>
#include <linux/wait.h>
#include <linux/mutex.h>

#include <linux/msm_mhi_dev.h>
struct byte_cntr {
	struct cdev		dev;
	struct class		*driver_class;
	bool			enable;
	bool			read_active;
	bool			pcie_chan_opened;
	bool			sw_usb;
	uint32_t		byte_cntr_value;
	uint32_t		block_size;
@@ -22,6 +23,7 @@ struct byte_cntr {
	atomic_t		usb_free_buf;
	wait_queue_head_t	wq;
	wait_queue_head_t	usb_wait_wq;
	wait_queue_head_t	pcie_wait_wq;
	struct workqueue_struct *usb_wq;
	struct qdss_request	*usb_req;
	struct work_struct	read_work;
@@ -29,6 +31,12 @@ struct byte_cntr {
	struct mutex		byte_cntr_lock;
	struct coresight_csr		*csr;
	unsigned long		offset;
	u32			pcie_out_chan;
	struct mhi_dev_client	*out_handle;
	struct work_struct	pcie_open_work;
	struct work_struct	pcie_write_work;
	struct workqueue_struct	*pcie_wq;
	void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
};

extern void usb_bypass_notifier(void *priv, unsigned int event,
@@ -36,5 +44,7 @@ extern void usb_bypass_notifier(void *priv, unsigned int event,
extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
extern void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data);
extern void usb_bypass_stop(struct byte_cntr *byte_cntr_data);

extern int etr_register_pcie_channel(struct byte_cntr *byte_cntr_data);
extern int etr_pcie_start(struct byte_cntr *byte_cntr_data);
extern void etr_pcie_stop(struct byte_cntr *byte_cntr_data);
#endif
+260 −63
Original line number Diff line number Diff line
@@ -1087,14 +1087,38 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
static struct etr_buf *
tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
{
	struct etr_buf *sysfs_buf = NULL;

	/*
	 * If we are enabling the ETR from disabled state, we need to make
	 * sure we have a buffer with the right size. The etr_buf is not reset
	 * immediately after we stop the tracing in SYSFS mode as we wait for
	 * the user to collect the data. We may be able to reuse the existing
	 * buffer, provided the size matches. Any allocation has to be done
	 * with the lock released.
	 */
	sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
	if (!sysfs_buf || (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM
			&& sysfs_buf->size != drvdata->size)
		|| (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
			&& drvdata->byte_cntr->sw_usb
			&& sysfs_buf->size != TMC_ETR_SW_USB_BUF_SIZE)
		|| (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE
			&& sysfs_buf->size != TMC_ETR_PCIE_MEM_SIZE)) {

		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
			&& drvdata->byte_cntr->sw_usb)
		return tmc_alloc_etr_buf(drvdata, TMC_ETR_SW_USB_BUF_SIZE,
			sysfs_buf = tmc_alloc_etr_buf(drvdata, TMC_ETR_SW_USB_BUF_SIZE,
					 0, cpu_to_node(0), NULL);
		else if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE)
			sysfs_buf = tmc_alloc_etr_buf(drvdata, TMC_ETR_PCIE_MEM_SIZE,
					 0, cpu_to_node(0), NULL);
		else
		return tmc_alloc_etr_buf(drvdata, drvdata->size,
			sysfs_buf = tmc_alloc_etr_buf(drvdata, drvdata->size,
					 0, cpu_to_node(0), NULL);
	}
	return sysfs_buf;
}

static void
tmc_etr_free_sysfs_buf(struct etr_buf *buf)
@@ -1187,13 +1211,13 @@ static int tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata)
	return 0;
}

static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
static int __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
{
	struct tmc_etr_bam_data *bamdata = drvdata->bamdata;
	uint32_t axictl;

	if (drvdata->enable_to_bam)
		return;
		return 0;

	/* Configure and enable required CSR registers */
	msm_qdss_csr_enable_bam_to_usb(drvdata->csr);
@@ -1202,7 +1226,13 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)

	CS_UNLOCK(drvdata->base);

	writel_relaxed(bamdata->data_fifo.size / 4, drvdata->base + TMC_RSZ);
	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
		writel_relaxed(bamdata->data_fifo.size / 4,
			drvdata->base + TMC_RSZ);
	else if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE)
		writel_relaxed(bamdata->connect.data.size / 4,
			drvdata->base + TMC_RSZ);

	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);

	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -1213,15 +1243,34 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
	axictl = (axictl & ~0x3) | 0x2;
	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
		if (bamdata->props.options & SPS_BAM_SMMU_EN) {
			writel_relaxed((uint32_t)bamdata->data_fifo.iova,
			       drvdata->base + TMC_DBALO);
		writel_relaxed((((uint64_t)bamdata->data_fifo.iova) >> 32)
			writel_relaxed(
				(((uint64_t)bamdata->data_fifo.iova) >> 32)
				& 0xFF, drvdata->base + TMC_DBAHI);
		} else {
			writel_relaxed((uint32_t)bamdata->data_fifo.phys_base,
			       drvdata->base + TMC_DBALO);
		writel_relaxed((((uint64_t)bamdata->data_fifo.phys_base) >> 32)
			writel_relaxed(
				(((uint64_t)bamdata->data_fifo.phys_base) >> 32)
				& 0xFF, drvdata->base + TMC_DBAHI);
		}
	}

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
		if (bamdata->props.options & SPS_BAM_SMMU_EN) {
			CS_LOCK(drvdata->base);
			dev_err(&drvdata->csdev->dev,
				"PCIE mode doesn't support smmu.\n");
			return -EINVAL;
		}

		writel_relaxed((uint32_t)bamdata->connect.data.phys_base,
			drvdata->base + TMC_DBALO);
		writel_relaxed(
			(((uint64_t)bamdata->connect.data.phys_base) >> 32)
			& 0xFF, drvdata->base + TMC_DBAHI);
	}
	/* Set FOnFlIn for periodic flush */
@@ -1233,6 +1282,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)

	msm_qdss_csr_enable_flush(drvdata->csr);
	drvdata->enable_to_bam = true;
	return 0;
}

static int get_usb_bam_iova(struct device *dev, unsigned long usb_bam_handle,
@@ -1282,13 +1332,14 @@ static int tmc_etr_bam_enable(struct tmc_drvdata *drvdata)
	bamdata->connect.source = bamdata->handle;
	bamdata->connect.event_thresh = 0x4;
	bamdata->connect.src_pipe_index = TMC_ETR_BAM_PIPE_INDEX;

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
		bamdata->connect.options = SPS_O_AUTO_ENABLE;

		bamdata->connect.destination = bamdata->dest;
		bamdata->connect.dest_pipe_index = bamdata->dest_pipe_idx;
		bamdata->connect.desc = bamdata->desc_fifo;
		bamdata->connect.data = bamdata->data_fifo;

		if (bamdata->props.options & SPS_BAM_SMMU_EN) {
			ret = get_usb_bam_iova(drvdata->csdev->dev.parent,
				bamdata->dest, &iova);
@@ -1296,6 +1347,44 @@ static int tmc_etr_bam_enable(struct tmc_drvdata *drvdata)
				goto err1;
			bamdata->connect.dest_iova = iova;
		}
	} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
		if (bamdata->props.options & SPS_BAM_SMMU_EN) {
			dev_err(&drvdata->csdev->dev,
				"PCIE mode doesn't support smmu.\n");
			ret = -EINVAL;
			goto err1;
		}

		bamdata->connect.options = SPS_O_AUTO_ENABLE | SPS_O_DUMMY_PEER;

		bamdata->connect.destination =
			drvdata->ipa_data->ipa_qdss_out.ipa_rx_db_pa;
		bamdata->connect.dest_pipe_index = 0;
		bamdata->connect.desc.phys_base =
			drvdata->ipa_data->ipa_qdss_in.desc_fifo_base_addr;
		bamdata->connect.desc.size =
			drvdata->ipa_data->ipa_qdss_in.desc_fifo_size;
		bamdata->connect.desc.base =
			ioremap(bamdata->connect.desc.phys_base,
			bamdata->connect.desc.size);
		if (!bamdata->connect.desc.base) {
			ret = -ENOMEM;
			goto err1;
		}

		bamdata->connect.data.phys_base =
			drvdata->ipa_data->ipa_qdss_in.data_fifo_base_addr;
		bamdata->connect.data.size =
			drvdata->ipa_data->ipa_qdss_in.data_fifo_size;
		bamdata->connect.data.base =
			ioremap(bamdata->connect.data.phys_base,
			bamdata->connect.data.size);
		if (!bamdata->connect.data.base) {
			ret = -ENOMEM;
			goto err1;
		}
	}

	ret = sps_connect(bamdata->pipe, &bamdata->connect);
	if (ret)
		goto err1;
@@ -1436,6 +1525,77 @@ int tmc_etr_bam_init(struct amba_device *adev,
	return sps_register_bam_device(&bamdata->props, &bamdata->handle);
}

int tmc_etr_ipa_init(struct amba_device *adev,
			struct tmc_drvdata *drvdata)
{
	int ret;
	struct device *dev = &adev->dev;
	struct device_node *node = adev->dev.of_node;
	struct tmc_etr_ipa_data *ipa_data;
	u32 value = 0;

	ipa_data = devm_kzalloc(dev, sizeof(*ipa_data), GFP_KERNEL);
	if (!ipa_data)
		return -ENOMEM;

	drvdata->ipa_data = ipa_data;

	ret = of_property_read_u32(node, "ipa-conn-data-base-pa", &value);
	if (ret) {
		pr_err("%s: Invalid ipa data base address property\n",
			__func__);
		return -EINVAL;
	}
	ipa_data->ipa_qdss_in.data_fifo_base_addr = value;

	ret = of_property_read_u32(node, "ipa-conn-data-size", &value);
	if (ret) {
		pr_err("%s: Invalid ipa data base size\n", __func__);
		return  -EINVAL;
	}
	ipa_data->ipa_qdss_in.data_fifo_size = value;

	ret = of_property_read_u32(node, "ipa-conn-desc-base-pa", &value);
	if (ret) {
		pr_err("%s: Invalid ipa desc base address property\n",
			__func__);
		return  -EINVAL;
	}
	ipa_data->ipa_qdss_in.desc_fifo_base_addr = value;

	ret = of_property_read_u32(node, "ipa-conn-desc-size", &value);
	if (ret) {
		pr_err("%s: Invalid ipa desc size  property\n", __func__);
		return -EINVAL;
	}
	ipa_data->ipa_qdss_in.desc_fifo_size = value;

	ret = of_property_read_u32(node, "ipa-peer-evt-reg-pa", &value);
	if (ret) {
		pr_err("%s: Invalid ipa peer reg pa property\n", __func__);
		return -EINVAL;
	}
	ipa_data->ipa_qdss_in.bam_p_evt_dest_addr = value;

	ipa_data->ipa_qdss_in.bam_p_evt_threshold = 0x4;
	ipa_data->ipa_qdss_in.override_eot = 0x1;
	return 0;
}

static int tmc_etr_ipa_conn(struct tmc_drvdata *drvdata)
{
	if (!drvdata->ipa_data)
		return -ENOMEM;

	return ipa_qdss_conn_pipes(&drvdata->ipa_data->ipa_qdss_in,
			&drvdata->ipa_data->ipa_qdss_out);
}

static int tmc_etr_ipa_disconn(void)
{
	return ipa_qdss_disconn_pipes();
}

static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
	int ret = 0;
@@ -1461,23 +1621,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM
		|| (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
			&& drvdata->byte_cntr->sw_usb)) {
		/*
		 * If we are enabling the ETR from disabled state, we need to make
		 * sure we have a buffer with the right size. The etr_buf is not reset
		 * immediately after we stop the tracing in SYSFS mode as we wait for
		 * the user to collect the data. We may be able to reuse the existing
		 * buffer, provided the size matches. Any allocation has to be done
		 * with the lock released.
		 */
		sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
		if (!sysfs_buf || (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM
				&& sysfs_buf->size != drvdata->size)
				|| (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
				&& drvdata->byte_cntr->sw_usb
				&&  sysfs_buf->size != TMC_ETR_SW_USB_BUF_SIZE)) {
			&& drvdata->byte_cntr->sw_usb)
		|| (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE &&
			drvdata->pcie_path == TMC_ETR_PCIE_SW_PATH)) {

			spin_unlock_irqrestore(&drvdata->spinlock, flags);
		/*
		 * ETR DDR memory is not allocated until user enables
		 * tmc at least once. If user specifies different ETR
@@ -1487,11 +1634,11 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
		 * next tmc enable session.
		 */
		/* Allocate memory with the locks released */
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
		new_buf = tmc_etr_setup_sysfs_buf(drvdata);
		if (IS_ERR(new_buf))
			return -ENOMEM;
		spin_lock_irqsave(&drvdata->spinlock, flags);
		}

		/*
		 * If we don't have a buffer or it doesn't match the requested size,
@@ -1506,6 +1653,29 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
			goto unlock_out;
	}

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE &&
			drvdata->pcie_path == TMC_ETR_PCIE_HW_PATH) {
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
		ret = tmc_etr_ipa_conn(drvdata);
		if (ret)
			return ret;

		ret = tmc_etr_bam_enable(drvdata);
		if (ret) {
			tmc_etr_ipa_disconn();
			return ret;
		}

		spin_lock_irqsave(&drvdata->spinlock, flags);
		ret = __tmc_etr_enable_to_bam(drvdata);
		if (ret) {
			spin_unlock_irqrestore(&drvdata->spinlock,
				flags);
			tmc_etr_ipa_disconn();
			return ret;
		}
	}

	drvdata->mode = CS_MODE_SYSFS;
	drvdata->enable = true;

@@ -1534,6 +1704,12 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
		tmc_etr_byte_cntr_start(drvdata->byte_cntr);
	if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE)
		etr_pcie_start(drvdata->byte_cntr);

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE
			&& drvdata->pcie_path == TMC_ETR_PCIE_SW_PATH)
		etr_pcie_start(drvdata->byte_cntr);

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM ||
		(drvdata->byte_cntr->sw_usb &&
@@ -2035,6 +2211,13 @@ static int _tmc_disable_etr_sink(struct coresight_device *csdev,
				spin_lock_irqsave(&drvdata->spinlock, flags);
				tmc_etr_disable_hw(drvdata);
			}
		} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE
			&& drvdata->pcie_path == TMC_ETR_PCIE_HW_PATH) {
			__tmc_etr_disable_to_bam(drvdata);
			spin_unlock_irqrestore(&drvdata->spinlock, flags);
			tmc_etr_bam_disable(drvdata);
			tmc_etr_ipa_disconn();
			goto out;
		} else {
			tmc_etr_disable_hw(drvdata);
		}
@@ -2048,14 +2231,20 @@ static int _tmc_disable_etr_sink(struct coresight_device *csdev,

	if ((drvdata->out_mode == TMC_ETR_OUT_MODE_USB
		&& drvdata->byte_cntr->sw_usb)
		|| drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
			tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
		else {
		|| drvdata->out_mode == TMC_ETR_OUT_MODE_MEM
		|| (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE
		&& drvdata->pcie_path == TMC_ETR_PCIE_SW_PATH)) {

		if (drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
			etr_pcie_stop(drvdata->byte_cntr);
			flush_workqueue(drvdata->byte_cntr->pcie_wq);
		} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
			usb_bypass_stop(drvdata->byte_cntr);
			flush_workqueue(drvdata->byte_cntr->usb_wq);
			drvdata->usbch = NULL;
		}
		} else
			tmc_etr_byte_cntr_stop(drvdata->byte_cntr);

		coresight_cti_unmap_trigin(drvdata->cti_reset,
				drvdata->cti_reset_trig_num, 0);
		coresight_cti_unmap_trigout(drvdata->cti_flush,
@@ -2086,6 +2275,8 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
		new_mode = TMC_ETR_OUT_MODE_MEM;
	else if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB]))
		new_mode = TMC_ETR_OUT_MODE_USB;
	else if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_PCIE]))
		new_mode = TMC_ETR_OUT_MODE_PCIE;
	else {
		mutex_unlock(&drvdata->mem_lock);
		return -EINVAL;
@@ -2152,6 +2343,12 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
		goto out;
	}

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB ||
		drvdata->out_mode == TMC_ETR_OUT_MODE_PCIE) {
		ret = -EINVAL;
		goto out;
	}

	if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
		ret = -EINVAL;
		goto out;
+49 −0
Original line number Diff line number Diff line
@@ -443,6 +443,47 @@ static ssize_t available_out_modes_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_out_modes);

static ssize_t pcie_path_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);

	return scnprintf(buf, PAGE_SIZE, "%s\n",
			str_tmc_etr_pcie_path[drvdata->pcie_path]);
}

static ssize_t pcie_path_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
	char str[10] = "";

	if (strlen(buf) >= 10)
		return -EINVAL;
	if (sscanf(buf, "%10s", str) != 1)
		return -EINVAL;

	mutex_lock(&drvdata->mem_lock);
	if (drvdata->enable) {
		mutex_unlock(&drvdata->mem_lock);
		pr_err("ETR is in use, disable it to switch the pcie path\n");
		return -EINVAL;
	}

	if (!strcmp(str, str_tmc_etr_pcie_path[TMC_ETR_PCIE_SW_PATH]))
		drvdata->pcie_path = TMC_ETR_PCIE_SW_PATH;
	else if (!strcmp(str, str_tmc_etr_pcie_path[TMC_ETR_PCIE_HW_PATH]))
		drvdata->pcie_path = TMC_ETR_PCIE_HW_PATH;
	else
		size = -EINVAL;

	mutex_unlock(&drvdata->mem_lock);
	return size;
}
static DEVICE_ATTR_RW(pcie_path);


static struct attribute *coresight_tmc_etf_attrs[] = {
	&dev_attr_trigger_cntr.attr,
	NULL,
@@ -454,6 +495,7 @@ static struct attribute *coresight_tmc_etr_attrs[] = {
	&dev_attr_block_size.attr,
	&dev_attr_out_mode.attr,
	&dev_attr_available_out_modes.attr,
	&dev_attr_pcie_path.attr,
	NULL,
};

@@ -586,6 +628,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)

	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
		 drvdata->pcie_path = TMC_ETR_PCIE_HW_PATH;
		drvdata->size = tmc_etr_get_default_buffer_size(dev);
	} else {
		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
@@ -655,6 +698,12 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
			goto out;
		idr_init(&drvdata->idr);
		mutex_init(&drvdata->idr_mutex);
		if (of_property_read_bool(dev->of_node,
			"qcom,qdss-ipa-support")) {
			ret = tmc_etr_ipa_init(adev, drvdata);
			if (ret)
				goto out;
		}
		dev_list = &etr_devs;
		break;
	case TMC_CONFIG_TYPE_ETF:
+23 −1
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <linux/usb_bam.h>
#include <linux/msm-sps.h>
#include <linux/usb/usb_qdss.h>
#include <linux/ipa_qdss.h>

#include "coresight-byte-cntr.h"

@@ -103,6 +104,8 @@
#define TMC_ETR_BAM_PIPE_INDEX	0
#define TMC_ETR_BAM_NR_PIPES	2

#define TMC_ETR_PCIE_MEM_SIZE	0x400000

#define TMC_AUTH_NSID_MASK	GENMASK(1, 0)

enum tmc_config_type {
@@ -148,18 +151,33 @@ enum etr_mode {
	ETR_MODE_CATU,		/* Use SG mechanism in CATU */
};

enum tmc_etr_pcie_path {
	TMC_ETR_PCIE_SW_PATH,
	TMC_ETR_PCIE_HW_PATH,
};

static const char * const str_tmc_etr_pcie_path[] = {
	[TMC_ETR_PCIE_SW_PATH]	= "sw",
	[TMC_ETR_PCIE_HW_PATH]	= "hw",
};

enum tmc_etr_out_mode {
	TMC_ETR_OUT_MODE_NONE,
	TMC_ETR_OUT_MODE_MEM,
	TMC_ETR_OUT_MODE_USB,
	TMC_ETR_OUT_MODE_PCIE,
};

static const char * const str_tmc_etr_out_mode[] = {
	[TMC_ETR_OUT_MODE_NONE]		= "none",
	[TMC_ETR_OUT_MODE_MEM]		= "mem",
	[TMC_ETR_OUT_MODE_USB]		= "usb",
	[TMC_ETR_OUT_MODE_PCIE]		= "pcie",
};
struct tmc_etr_ipa_data {
	struct ipa_qdss_conn_out_params ipa_qdss_out;
	struct ipa_qdss_conn_in_params  ipa_qdss_in;
};

struct tmc_etr_bam_data {
	struct sps_bam_props	props;
	unsigned long		handle;
@@ -263,6 +281,8 @@ struct tmc_drvdata {
	u32			cti_flush_trig_num;
	u32			cti_reset_trig_num;
	enum tmc_etr_out_mode	out_mode;
	enum tmc_etr_pcie_path	pcie_path;
	struct tmc_etr_ipa_data	*ipa_data;
};

struct etr_buf_operations {
@@ -331,6 +351,8 @@ void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
		  struct usb_qdss_ch *ch);
int tmc_etr_bam_init(struct amba_device *adev,
		     struct tmc_drvdata *drvdata);
int tmc_etr_ipa_init(struct amba_device *adev,
			struct tmc_drvdata *drvdata);
extern struct byte_cntr *byte_cntr_init(struct amba_device *adev,
					struct tmc_drvdata *drvdata);
int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf);