Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e2a5053 authored by Sagar Gore's avatar Sagar Gore
Browse files

msm: camera: Enable more than one composite stats



Currently only one composite group of stats can be configured.
However if target supports added logic to form multiple composite masks.
Userspace will send composite group id along with stats stream info,
kernel will create composite masks and write to registers.

Change-Id: Iabeb553470e78252fdfaf280b31305be9698af50
Signed-off-by: default avatarSagar Gore <sgore@codeaurora.org>
parent c35ae825
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@

#define VFE_MAX_CFG_TIMEOUT 3000
#define VFE_CLK_INFO_MAX 16
#define STATS_COMP_BIT_MASK 0xFF0000

struct vfe_device;
struct msm_vfe_axi_stream;
@@ -375,7 +376,7 @@ struct msm_vfe_stats_stream {
struct msm_vfe_stats_shared_data {
	struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
	uint8_t num_active_stream;
	atomic_t stats_comp_mask;
	atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
	uint16_t stream_handle_cnt;
	atomic_t stats_update;
};
+49 −7
Original line number Diff line number Diff line
@@ -1193,13 +1193,55 @@ static int msm_vfe40_stats_check_streams(
static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
	uint32_t stats_mask, uint8_t enable)
{
	uint32_t comp_mask;
	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44) >> 16;
	if (enable)
		comp_mask |= stats_mask;
	else
		comp_mask &= ~stats_mask;
	msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
	uint32_t reg_mask, comp_stats_mask;
	uint32_t i = 0;
	atomic_t *stats_comp;
	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;

	stats_mask = stats_mask & 0xFF;

	if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
			MAX_NUM_STATS_COMP_MASK) {
		pr_err("%s: num of comp masks %d exceed max %d\n",
			__func__,
			vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
			MAX_NUM_STATS_COMP_MASK);
		return;
	}

	for (i = 0;
		i < vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; i++) {

		reg_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
		comp_stats_mask = reg_mask & (STATS_COMP_BIT_MASK << (i*8));
		stats_comp = &stats_data->stats_comp_mask[i];

		if (enable) {
			if (comp_stats_mask)
				continue;

			reg_mask |= (stats_mask << (16 + i*8));
			atomic_add(stats_mask, stats_comp);
		} else {
			/*
			 * Check if comp mask in reg is valid
			 * and contains this stat
			 */
			if (!comp_stats_mask ||
				!((comp_stats_mask >> (16 + i*8)) &
					stats_mask))
				continue;

			atomic_sub(stats_mask, stats_comp);
			reg_mask &= ~(stats_mask << (16 + i*8));
		}
		ISP_DBG("%s: comp_mask: %x atomic stats[0]: %x %x\n",
			__func__, reg_mask,
			atomic_read(&stats_data->stats_comp_mask[0]),
			atomic_read(&stats_data->stats_comp_mask[1]));
		msm_camera_io_w(reg_mask, vfe_dev->vfe_base + 0x44);
		return;
	}
}

static void msm_vfe40_stats_cfg_wm_irq_mask(
+62 −10
Original line number Diff line number Diff line
@@ -56,6 +56,8 @@
static uint8_t stats_pingpong_offset_map[] = {
	7, 8, 9, 10, 11, 12, 13, 14, 15};

#define SHIFT_BF_SCALE_BIT 1
#define VFE44_NUM_STATS_COMP 2
#define VFE44_NUM_STATS_TYPE 9
#define VFE44_STATS_BASE(idx) \
	((idx) == STATS_IDX_BF_SCALE ? 0xA0C : (0x168 + 0x18 * (idx-1)))
@@ -1048,15 +1050,65 @@ static void msm_vfe44_stats_cfg_comp_mask(
	struct vfe_device *vfe_dev,
	uint32_t stats_mask, uint8_t enable)
{
	uint32_t comp_mask;
	/* BF scale is controlled by BF also */
	stats_mask = (stats_mask >> 1) & 0xFF;
	comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44) >> 16;
	if (enable)
		comp_mask |= stats_mask;
	else
		comp_mask &= ~stats_mask;
	msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
	uint32_t reg_mask, comp_stats_mask, mask_bf_scale;
	uint32_t i = 0;
	atomic_t *stats_comp;
	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;

	if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
			MAX_NUM_STATS_COMP_MASK) {
		pr_err("%s: num of comp masks %d exceed max %d\n",
			__func__,
			vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
			MAX_NUM_STATS_COMP_MASK);
		return;
	}

	/* BF scale is controlled by BF also so ignore bit 0 of BF scale */
	stats_mask = stats_mask & 0x1FF;
	mask_bf_scale = stats_mask >> SHIFT_BF_SCALE_BIT;

	for (i = 0;
		i < vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; i++) {
		stats_comp = &stats_data->stats_comp_mask[i];
		reg_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
		comp_stats_mask = reg_mask & (STATS_COMP_BIT_MASK << (i*8));

		if (enable) {
			if (comp_stats_mask)
				continue;

			reg_mask |= (mask_bf_scale << (16 + i*8));
			atomic_add(stats_mask, stats_comp);
		} else {

			if (stats_mask & (1 << STATS_IDX_BF_SCALE) &&
				atomic_read(stats_comp) &
					(1 << STATS_IDX_BF_SCALE))
				atomic_sub((1 << STATS_IDX_BF_SCALE),
					stats_comp);

			/*
			 * Check if comp mask in reg is valid
			 * and contains this stat
			 */

			if (!comp_stats_mask ||
				!((comp_stats_mask >> (16 + i*8)) &
					mask_bf_scale))
				continue;

			atomic_sub(stats_mask, stats_comp);
			reg_mask &= ~(mask_bf_scale << (16 + i*8));
		}
		ISP_DBG("%s: comp_mask: %x atomic stats[0]: %x %x\n",
			__func__, reg_mask,
			atomic_read(&stats_data->stats_comp_mask[0]),
			atomic_read(&stats_data->stats_comp_mask[1]));

		msm_camera_io_w(reg_mask, vfe_dev->vfe_base + 0x44);
		return;
	}
}

static void msm_vfe44_stats_cfg_wm_irq_mask(
@@ -1297,7 +1349,7 @@ static struct msm_vfe_stats_hardware_info msm_vfe44_stats_hw_info = {
		1 << MSM_ISP_STATS_BF_SCALE,
	.stats_ping_pong_offset = stats_pingpong_offset_map,
	.num_stats_type = VFE44_NUM_STATS_TYPE,
	.num_stats_comp_mask = 2,
	.num_stats_comp_mask = VFE44_NUM_STATS_COMP,
};

static struct v4l2_subdev_core_ops msm_vfe44_subdev_core_ops = {
+133 −67
Original line number Diff line number Diff line
@@ -71,14 +71,16 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
	uint32_t irq_status0, uint32_t irq_status1,
	struct msm_isp_timestamp *ts)
{
	int i, rc;
	int i, j, rc;
	struct msm_isp_event_data buf_event;
	struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
	struct msm_isp_buffer *done_buf;
	struct msm_vfe_stats_stream *stream_info = NULL;
	uint32_t pingpong_status;
	uint32_t comp_stats_type_mask = 0;
	uint32_t comp_stats_type_mask = 0, atomic_stats_mask = 0;
	uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
	uint32_t num_stats_comp_mask =
		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
	stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
		get_comp_mask(irq_status0, irq_status1);
	stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
@@ -87,13 +89,30 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
		return;
	ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);

	if (!stats_comp_mask)
		stats_irq_mask &=
			~atomic_read(&vfe_dev->stats_data.stats_comp_mask);
	else
		stats_irq_mask |=
			atomic_read(&vfe_dev->stats_data.stats_comp_mask);
	/*
	 * If any of composite mask is set, clear irq bits from mask,
	 * they will be restored by comp mask
	 */
	if (stats_comp_mask) {
		for (j = 0; j < num_stats_comp_mask; j++) {
			stats_irq_mask &= ~atomic_read(
				&vfe_dev->stats_data.stats_comp_mask[j]);
		}
	}

	for (j = 0; j < num_stats_comp_mask; j++) {
		atomic_stats_mask = atomic_read(
			&vfe_dev->stats_data.stats_comp_mask[j]);
		if (!stats_comp_mask) {
			stats_irq_mask &= ~atomic_stats_mask;
		} else {
			/* restore irq bits from composite mask */
			if (stats_comp_mask & (1 << j))
				stats_irq_mask |= atomic_stats_mask;
		}
		/* if no irq bits set from this composite mask continue*/
		if (!stats_irq_mask)
			continue;
		memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
		buf_event.timestamp = ts->event_time;
		buf_event.frame_id =
@@ -102,7 +121,8 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
		pingpong_status = vfe_dev->hw_info->
			vfe_ops.stats_ops.get_pingpong_status(vfe_dev);

	for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
		for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type;
			i++) {
			if (!(stats_irq_mask & (1 << i)))
				continue;
			stream_info = &vfe_dev->stats_data.stream_info[i];
@@ -110,36 +130,44 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
			msm_isp_stats_cfg_ping_pong_address(vfe_dev,
				stream_info, pingpong_status, &done_buf);
			if (done_buf) {
			rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
				done_buf->bufq_handle, done_buf->buf_idx,
				&ts->buf_time, vfe_dev->axi_data.
				rc = vfe_dev->buf_mgr->ops->buf_divert(
					vfe_dev->buf_mgr, done_buf->bufq_handle,
					done_buf->buf_idx, &ts->buf_time,
					vfe_dev->axi_data.
					src_info[VFE_PIX_0].frame_id);
				if (rc != 0)
					continue;

			stats_event->stats_buf_idxs[stream_info->stats_type] =
				stats_event->stats_buf_idxs
					[stream_info->stats_type] =
					done_buf->buf_idx;
				if (!stream_info->composite_flag) {
					stats_event->stats_mask =
						1 << stream_info->stats_type;
				ISP_DBG("%s: stats event frame id: 0x%x\n",
					__func__, buf_event.frame_id);
					ISP_DBG("%s: stats frameid: 0x%x %d\n",
						__func__, buf_event.frame_id,
						stream_info->stats_type);
					msm_isp_send_event(vfe_dev,
						ISP_EVENT_STATS_NOTIFY +
					stream_info->stats_type, &buf_event);
						stream_info->stats_type,
						&buf_event);
				} else {
					comp_stats_type_mask |=
						1 << stream_info->stats_type;
				}
			}
			stats_irq_mask &= ~(1 << i);
		}

		if (comp_stats_type_mask) {
		ISP_DBG("%s: composite stats event frame id: 0x%x mask: 0x%x\n",
			__func__, buf_event.frame_id, comp_stats_type_mask);
			ISP_DBG("%s: comp_stats frameid: 0x%x, 0x%x\n",
				__func__, buf_event.frame_id,
				comp_stats_type_mask);
			stats_event->stats_mask = comp_stats_type_mask;
			msm_isp_send_event(vfe_dev,
				ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
			comp_stats_type_mask = 0;
		}
	}
}

@@ -346,12 +374,6 @@ void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
			stats_data->stream_info[i].state == STATS_STOPPING) {
			if (stats_data->stream_info[i].composite_flag)
				comp_stats_mask |= i;
			if (stats_data->stream_info[i].state == STATS_STARTING)
				atomic_add(BIT(i),
					&stats_data->stats_comp_mask);
			else
				atomic_sub(BIT(i),
					&stats_data->stats_comp_mask);
			stats_data->stream_info[i].state =
				stats_data->stream_info[i].state ==
				STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
@@ -383,9 +405,14 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
	int i, rc = 0;
	uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
	uint32_t stats_mask = 0, idx;
	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
	uint32_t num_stats_comp_mask = 0;
	struct msm_vfe_stats_stream *stream_info;
	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;

	num_stats_comp_mask =
		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
	rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
		stats_data->stream_info);
	if (rc < 0)
@@ -405,6 +432,13 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
				__func__, stream_cfg_cmd->stream_handle[i]);
			continue;
		}

		if (stream_info->composite_flag > num_stats_comp_mask) {
			pr_err("%s: comp grp %d exceed max %d\n",
				__func__, stream_info->composite_flag,
				num_stats_comp_mask);
			return -EINVAL;
		}
		rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
		if (rc < 0) {
			pr_err("%s: No buffer for stream%d\n", __func__, idx);
@@ -418,17 +452,27 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,

		stats_data->num_active_stream++;
		stats_mask |= 1 << idx;
		if (stream_info->composite_flag)
			comp_stats_mask |= 1 << idx;

		if (stream_info->composite_flag > 0)
			comp_stats_mask[stream_info->composite_flag-1] |=
				1 << idx;

		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
			__func__, comp_stats_mask[0],
			comp_stats_mask[1],
			stats_data->num_active_stream);

	}

	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
	} else {
		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
			vfe_dev, stats_mask, stream_cfg_cmd->enable);
		atomic_add(comp_stats_mask, &stats_data->stats_comp_mask);
		for (i = 0; i < num_stats_comp_mask; i++) {
			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
		   vfe_dev, comp_stats_mask, 1);
			 vfe_dev, comp_stats_mask[i], 1);
		}
	}
	return rc;
}
@@ -437,10 +481,16 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
	int i, rc = 0;
	uint32_t stats_mask = 0, comp_stats_mask = 0, idx;
	uint32_t stats_mask = 0, idx;
	uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
	uint32_t num_stats_comp_mask = 0;
	struct msm_vfe_stats_stream *stream_info;
	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
	num_stats_comp_mask =
		vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;

	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {

		idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);

		if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
@@ -456,6 +506,13 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
			continue;
		}

		if (stream_info->composite_flag > num_stats_comp_mask) {
			pr_err("%s: comp grp %d exceed max %d\n",
				__func__, stream_info->composite_flag,
				num_stats_comp_mask);
			return -EINVAL;
		}

		if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
			stream_info->state = STATS_STOP_PENDING;
		else
@@ -463,17 +520,26 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,

		stats_data->num_active_stream--;
		stats_mask |= 1 << idx;
		if (stream_info->composite_flag)
			comp_stats_mask |= 1 << idx;

		if (stream_info->composite_flag > 0)
			comp_stats_mask[stream_info->composite_flag-1] |=
				1 << idx;

		ISP_DBG("%s: stats_mask %x %x active streams %d\n",
			__func__, comp_stats_mask[0],
			comp_stats_mask[1],
			stats_data->num_active_stream);
	}

	if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
		rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
	} else {
		vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
			vfe_dev, stats_mask, stream_cfg_cmd->enable);
		atomic_sub(comp_stats_mask, &stats_data->stats_comp_mask);
		for (i = 0; i < num_stats_comp_mask; i++) {
			vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
		   vfe_dev, comp_stats_mask, 0);
			   vfe_dev, comp_stats_mask[i], 0);
		}
	}

	for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+7 −0
Original line number Diff line number Diff line
@@ -109,6 +109,13 @@ enum msm_vfe_inputmux {
	EXTERNAL_READ,
};

enum msm_vfe_stats_composite_group {
	STATS_COMPOSITE_GRP_NONE,
	STATS_COMPOSITE_GRP_1,
	STATS_COMPOSITE_GRP_2,
	STATS_COMPOSITE_GRP_MAX,
};

struct msm_vfe_pix_cfg {
	struct msm_vfe_camif_cfg camif_cfg;
	enum msm_vfe_inputmux input_mux;