Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0593d84 authored by DL's avatar DL
Browse files

* Update exynos MFC driver to conform to Samsung Treltexx sources.

* Update v4l2 driver to conform to Samsung Treltexx sources.
* Remove non-existent VIDEO_EXT secmem region

The imported code had additional code that appears to belong
to a different SoC. The code in question was spread out
across the exynos-ion driver, the secmem driver and the Exynos
MFC driver. It was causing attempts to access non-existent
contiguous memory regions, attempts to create secure sysmmu
page tables in these non-existent regions and attempts to
allocate protected memory, all of which failed.

The symptom is failure to instantiate all Exynos hardware
video codecs. This backport of Samsung treltexx sources
fixes the failure to instantiate hardware video codecs
parent 2cb5e430
Loading
Loading
Loading
Loading

arch/arm/mach-exynos/secmem.c

100644 → 100755
+3 −5
Original line number Diff line number Diff line
@@ -44,7 +44,6 @@ static uint32_t secmem_regions[] = {
	ION_EXYNOS_ID_MFC_SH,
	ION_EXYNOS_ID_G2D_WFD,
	ION_EXYNOS_ID_VIDEO,
	ION_EXYNOS_ID_VIDEO_EXT,
	ION_EXYNOS_ID_SECTBL,
	ION_EXYNOS_ID_MFC_FW,
	ION_EXYNOS_ID_MFC_NFW,
@@ -54,10 +53,9 @@ static char *secmem_regions_name[] = {
	"mfc_sh",	/* 0 */
	"g2d_wfd",	/* 1 */
	"video",	/* 2 */
	"video_ext",	/* 3 */
	"sectbl",	/* 4 */
	"mfc_fw",	/* 5 */
	"mfc_nfw",	/* 6 */
	"sectbl",	/* 3 */
	"mfc_fw",	/* 4 */
	"mfc_nfw",	/* 5 */
	NULL
};

+4 −143
Original line number Diff line number Diff line
@@ -31,8 +31,6 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/plist.h>
#include <linux/kref.h>
#include <mach/smc.h>

#include <asm/pgtable.h>

@@ -418,117 +416,9 @@ struct ion_exynos_cmadata {
	int id;
	char name[MAX_CONTIG_NAME + 1];
	bool isolated_on_boot; /* set on boot-time. unset by isolated_store() */
	bool secure;
	struct mutex lock;
	struct kref secure_ref;
};

static int __ion_secure_protect(struct device *dev)
{
	struct cma_info info;
	struct ion_exynos_cmadata *cmadata = dev_get_drvdata(dev);
	int try = 2;
	int ret;

	if (dma_contiguous_info(dev, &info)) {
		dev_err(dev, "Failed to retrieve region information\n");
		return -EPERM;
	}
	do {
		ret = exynos_smc(SMC_DRM_SECMEM_REGION_INFO, cmadata->id,
				info.base, info.size);
	} while (ret != 0 && --try > 0);
	if (ret != 0) {
		pr_crit("%s: failed smc call for region info, ret=%d\n",
			__func__, ret);
		return -EFAULT;
	}

	try = 2;
	do {
		ret = exynos_smc(SMC_DRM_SECMEM_REGION_PROT, cmadata->id,
				SMC_PROTECTION_ENABLE, 0);
	} while (ret != 0 && --try > 0);
	if (ret != 0) {
		pr_crit("%s: failed smc call for region prot, ret=%d\n",
			__func__, ret);
		return -EFAULT;
	}

	pr_info("%s: Successed smc call for region, ret %d\n",
		__func__, ret);
	return 0;
}

int ion_secure_protect(struct device *dev)
{
	struct ion_exynos_cmadata *cmadata = dev_get_drvdata(dev);
	if (!cmadata->secure) {
		pr_err("%s: region %s is not secure region\n", __func__, cmadata->name);
		return -EPERM;
	}
	if (atomic_inc_return(&cmadata->secure_ref.refcount) > 1)
		return 0;

	if (__ion_secure_protect(dev)) {
		atomic_set(&cmadata->secure_ref.refcount, 0);
		pr_crit("%s: protection failed for region %d\n",
			__func__, cmadata->id);
		return -EFAULT;
	}
	return 0;
}

static void __ion_secure_unprotect(struct kref *kref)
{
	struct ion_exynos_cmadata *cmadata = container_of(kref,
					struct ion_exynos_cmadata, secure_ref);
	BUG_ON(exynos_smc(SMC_DRM_SECMEM_REGION_PROT,
				cmadata->id, SMC_PROTECTION_DISABLE, 0) != 0);
}

int ion_secure_unprotect(struct device *dev)
{
	struct ion_exynos_cmadata *cmadata = dev_get_drvdata(dev);
	if (!cmadata->secure) {
		pr_err("%s: region %s is not secure region\n", __func__, cmadata->name);
		return -EPERM;
	}
	kref_put(&cmadata->secure_ref, __ion_secure_unprotect);
	return 0;
}

int ion_is_region_available(struct device *dev, unsigned long flags)
{
	struct cma_info info;
	bool protected = !!(flags & ION_FLAG_PROTECTED);
	struct ion_exynos_cmadata *cmadata = dev_get_drvdata(dev);
	bool region = (atomic_read(&cmadata->secure_ref.refcount) == 0)?
		false : true;

	if (protected == region) {
		return 0;
	} else {
		if (!protected) {
			pr_err("%s : region %s is protected, normal request is invalid\n",
				__func__, cmadata->name);
			return -EPERM;
		} else {
			if (dma_contiguous_info(dev, &info) != 0) {
				dev_err(dev, "failed to retrieve region information\n");
				return -ENODEV;
			}
			if (info.free != info.size) {
				pr_err("%s : region %s is busy due to normal allocation of secure region\n",
					__func__, cmadata->name);
				pr_err("%s : region %s is now in-use, total = %zd, free = %zd\n",
					__func__, cmadata->name, info.size, info.free);
				return -EBUSY;
			}
			return 0;
		}
	}
}
static int ion_cma_device_name_match(struct device *dev, void *data)
{
	struct ion_exynos_cmadata *cmadata = dev_get_drvdata(dev);
@@ -664,7 +554,7 @@ static int ion_exynos_contig_heap_allocate(struct ion_heap *heap,

	/* fixup of old DRM flags */
	if (flags & (ION_EXYNOS_FIMD_VIDEO_MASK | ION_EXYNOS_MFC_OUTPUT_MASK |
			ION_EXYNOS_MFC_INPUT_MASK | ION_EXYNOS_VIDEO_EXT2_MASK))
			ION_EXYNOS_MFC_INPUT_MASK))
		id = ION_EXYNOS_ID_VIDEO;

	dev = device_find_child(contig_heap->dev, &id, ion_cma_device_id_match);
@@ -678,10 +568,6 @@ static int ion_exynos_contig_heap_allocate(struct ion_heap *heap,
	if (!align)
		align = PAGE_SIZE;

	ret = ion_is_region_available(dev, flags);
	if (ret != 0)
		return ret;

	buffer->priv_virt = dma_alloc_from_contiguous(dev, len >> PAGE_SHIFT,
					      get_order(align));
	if (buffer->priv_virt == NULL) {
@@ -697,13 +583,7 @@ static int ion_exynos_contig_heap_allocate(struct ion_heap *heap,
				__func__, dev_name(dev));
		return -ENOMEM;
	}
	if (buffer->flags & ION_FLAG_PROTECTED)
		ret = ion_secure_protect(dev);

	if (ret != 0) {
		dma_release_from_contiguous(
			dev, buffer->priv_virt, len >> PAGE_SHIFT);
	}
	return ret;
}

@@ -716,7 +596,7 @@ static void ion_exynos_contig_heap_free(struct ion_buffer *buffer)

	/* fixup of old DRM flags */
	if (buffer->flags & (ION_EXYNOS_FIMD_VIDEO_MASK | ION_EXYNOS_MFC_OUTPUT_MASK |
				ION_EXYNOS_MFC_INPUT_MASK | ION_EXYNOS_VIDEO_EXT2_MASK))
				ION_EXYNOS_MFC_INPUT_MASK))
		id = ION_EXYNOS_ID_VIDEO;

	dev = device_find_child(contig_heap->dev, &id, ion_cma_device_id_match);
@@ -732,8 +612,6 @@ static void ion_exynos_contig_heap_free(struct ion_buffer *buffer)
			page_to_phys((struct page *)buffer->priv_virt),
			dev_name(dev));
	}
	if (buffer->flags & ION_FLAG_PROTECTED)
		ion_secure_unprotect(dev);
}

static int ion_exynos_contig_heap_phys(struct ion_heap *heap,
@@ -1366,7 +1244,6 @@ struct exynos_ion_contig_region {
	phys_addr_t base;
	struct device dev;
	bool isolated;
	bool secure;
};

static int contig_region_cursor __initdata;
@@ -1451,22 +1328,6 @@ static int __init __fdt_init_exynos_ion(unsigned long node, const char *uname,
		}

	}
	prop = of_get_flat_dt_prop(node, "secure", &len);
	for (i = 0; prop && (unsigned long)i < (len / sizeof(long)); i++) {
		int id;
		int j;

		id = be32_to_cpu(prop[i]);

		for (j = 0; j < contig_region_cursor; j++) {
			if (exynos_ion_contig_region[j].id == id) {
				exynos_ion_contig_region[j].secure = true;
				break;
			}
		}

	}

	return 0;
}

@@ -1621,6 +1482,8 @@ int __init init_exynos_ion_contig_heap(void)
		contig_region_cursor++;
	}

	pr_info("init_exynos_ion_contig_heap");

	return 0;
}
#endif	/* CONFIG_ION_EXYNOS_OF */
@@ -1726,8 +1589,6 @@ static int __init ion_exynos_contigheap_init(void)
		drvdata->id = exynos_ion_contig_region[i].id;
		strncpy(drvdata->name, exynos_ion_contig_region[i].name,
			MAX_CONTIG_NAME);
		drvdata->secure = exynos_ion_contig_region[i].secure;
		atomic_set(&drvdata->secure_ref.refcount, 0);

		dev = device_create(ion_cma_class,
				__init_contig_heap->dev, 0, drvdata,
+0 −2
Original line number Diff line number Diff line
@@ -141,7 +141,6 @@ static inline unsigned int r2h_bits(int cmd)
#define S5P_FIMV_RET_INSTANCE_ID		0xF070

#define S5P_FIMV_ERROR_CODE			0xF074
#define S5P_FIMV_ERR_SYNC_POINT_NOT_RECEIVED	190
#define S5P_FIMV_ERR_WARNINGS_START		160
#define S5P_FIMV_ERR_WARNINGS_END		222
#define S5P_FIMV_ERR_DEC_MASK			0xFFFF
@@ -167,7 +166,6 @@ static inline unsigned int r2h_bits(int cmd)
#define S5P_FIMV_D_OPT_TILE_MODE_SHIFT		0
#define S5P_FIMV_D_OPT_DYNAMIC_DPB_SET_SHIFT	3
#define S5P_FIMV_D_OPT_NOT_CODED_SET_SHIFT	4
#define S5P_FIMV_D_OPT_SPECIAL_PARSING_SHIFT	15

#define S5P_FIMV_D_DISPLAY_DELAY		0xF0B8

+0 −2
Original line number Diff line number Diff line
@@ -148,7 +148,6 @@ static inline unsigned int r2h_bits(int cmd)
#define S5P_FIMV_RET_INSTANCE_ID		0xF070
#define S5P_FIMV_ERROR_CODE			0xF074

#define S5P_FIMV_ERR_SYNC_POINT_NOT_RECEIVED	190
#define S5P_FIMV_ERR_WARNINGS_START		160
#define S5P_FIMV_ERR_WARNINGS_END		222
#define S5P_FIMV_ERR_DEC_MASK			0xFFFF
@@ -175,7 +174,6 @@ static inline unsigned int r2h_bits(int cmd)
#define S5P_FIMV_D_OPT_TILE_MODE_SHIFT		0
#define S5P_FIMV_D_OPT_DYNAMIC_DPB_SET_SHIFT	3
#define S5P_FIMV_D_OPT_NOT_CODED_SET_SHIFT	4
#define S5P_FIMV_D_OPT_SPECIAL_PARSING_SHIFT	15

#define S5P_FIMV_D_DISPLAY_DELAY		0xF0B8

+33 −126
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@ static struct proc_dir_entry *mfc_proc_entry;
#define MFC_DRM_MAGIC_CHUNK3	0x3bd05317
#endif

#define MFC_SFR_AREA_COUNT	19
#define MFC_SFR_AREA_COUNT	14
void s5p_mfc_dump_regs(struct s5p_mfc_dev *dev)
{
	int i;
@@ -97,14 +97,14 @@ void s5p_mfc_dump_regs(struct s5p_mfc_dev *dev)
		{ 0x0, 0x80 },
		{ 0x1000, 0xCD0 },
		{ 0xF000, 0xFF8 },
#if 0
		{ 0x2000, 0xF70 },
		{ 0x3000, 0x904 },
#else
		{ 0x2000, 0xA00 },
		{ 0x3000, 0x40 },
		{ 0x3110, 0x10 },
		{ 0x5000, 0x100 },
		{ 0x5200, 0x300 },
		{ 0x5600, 0x100 },
		{ 0x5800, 0x100 },
		{ 0x5A00, 0x100 },
#endif
		{ 0x5000, 0x9C4 },
		{ 0x6000, 0xC4 },
		{ 0x7000, 0x21C },
		{ 0x8000, 0x20C },
@@ -534,13 +534,11 @@ static int s5p_mfc_check_hw_state(struct s5p_mfc_dev *dev)
#elif defined(CONFIG_SOC_EXYNOS5433)
static int mfc_check_power_state(struct s5p_mfc_dev *dev)
{
	int reg_val, ref_val, state_val;
	int reg_val, ref_val;

	ref_val = s5p_mfc_get_power_ref_cnt(dev);
	reg_val = readl(EXYNOS5433_MFC_CONFIGURATION);
	state_val = readl(EXYNOS5433_MFC_STATUS);
	mfc_err("* MFC power config = 0x%x, state = 0x%x, ref cnt = %d\n",
			reg_val, state_val, ref_val);
	mfc_err("* MFC power state = 0x%x, ref cnt = %d\n", reg_val, ref_val);

	if (reg_val)
		return 1;
@@ -587,7 +585,7 @@ static int mfc_check_clock_state(struct s5p_mfc_dev *dev)

	return 0;
}
int s5p_mfc_check_hw_state(struct s5p_mfc_dev *dev)
static int s5p_mfc_check_hw_state(struct s5p_mfc_dev *dev)
{
	int ret;

@@ -863,9 +861,7 @@ static void mfc_check_ref_frame(struct s5p_mfc_ctx *ctx,
{
	struct s5p_mfc_dec *dec = ctx->dec_priv;
	struct s5p_mfc_buf *ref_buf, *tmp_buf;
	struct list_head *dst_list;	
	int index;
	int found = 0;	

	list_for_each_entry_safe(ref_buf, tmp_buf, ref_list, list) {
		index = ref_buf->vb.v4l2_buf.index;
@@ -881,27 +877,10 @@ static void mfc_check_ref_frame(struct s5p_mfc_ctx *ctx,
			clear_bit(index, &dec->dpb_status);
			mfc_debug(2, "Move buffer[%d], fd[%d] to dst queue\n",
					index, dec->assigned_fd[index]);
			found = 1;					
			break;
		}
	}

	if (is_h264(ctx) && !found) {
		dst_list = &ctx->dst_queue;
		list_for_each_entry_safe(ref_buf, tmp_buf, dst_list, list) {
			index = ref_buf->vb.v4l2_buf.index;
			if (index == ref_index && ref_buf->already) {
				dec->assigned_fd[index] =
					ref_buf->vb.v4l2_planes[0].m.fd;
				clear_bit(index, &dec->dpb_status);
				mfc_debug(2, "re-assigned buffer[%d], fd[%d] for H264\n",
						index, dec->assigned_fd[index]);
				found = 1;
			break;
		}
	}
}
}

/* Process the released reference information */
static void mfc_handle_released_info(struct s5p_mfc_ctx *ctx,
@@ -917,17 +896,11 @@ static void mfc_handle_released_info(struct s5p_mfc_ctx *ctx,
	if (released_flag) {
		for (t = 0; t < MFC_MAX_DPBS; t++) {
			if (released_flag & (1 << t)) {
				if (dec->err_sync_flag & (1 << t)) {
					mfc_debug(2, "Released, but reuse. FD[%d] = %03d\n",
							t, dec->assigned_fd[t]);
					dec->err_sync_flag &= ~(1 << t);
				} else {
					mfc_debug(2, "Release FD[%d] = %03d\n",
				mfc_debug(2, "Release FD[%d] = %03d !! ",
						t, dec->assigned_fd[t]);
				refBuf->dpb[ncount].fd[0] = dec->assigned_fd[t];
					ncount++;
				}
				dec->assigned_fd[t] = MFC_INFO_INIT_FD;
				ncount++;
				mfc_check_ref_frame(ctx, dst_queue_addr, t);
			}
		}
@@ -1066,29 +1039,12 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
		mfc_debug(2, "Listing: %d\n", dst_buf->vb.v4l2_buf.index);
		/* Check if this is the buffer we're looking for */
		mfc_debug(2, "0x%08lx, 0x%08x",
				(unsigned long)s5p_mfc_mem_plane_addr(ctx,
					&dst_buf->vb, 0), dspl_y_addr);
				(unsigned long)s5p_mfc_mem_plane_addr(
							ctx, &dst_buf->vb, 0),
				dspl_y_addr);
		if (s5p_mfc_mem_plane_addr(ctx, &dst_buf->vb, 0)
							== dspl_y_addr) {
			index = dst_buf->vb.v4l2_buf.index;
			if (ctx->codec_mode == S5P_FIMV_CODEC_VC1RCV_DEC &&
					s5p_mfc_err_dspl(err) == S5P_FIMV_ERR_SYNC_POINT_NOT_RECEIVED) {
				if (released_flag & (1 << index)) {
					list_del(&dst_buf->list);
					dec->ref_queue_cnt--;
					list_add_tail(&dst_buf->list, &ctx->dst_queue);
					ctx->dst_queue_cnt++;
					dec->dpb_status &= ~(1 << index);
					released_flag &= ~(1 << index);
					mfc_debug(2, "SYNC_POINT_NOT_RECEIVED, released.\n");
				} else {
					dec->err_sync_flag |= 1 << index;
					mfc_debug(2, "SYNC_POINT_NOT_RECEIVED, used.\n");
				}
				dec->dynamic_used |= released_flag;
				break;
			}

			list_del(&dst_buf->list);

			if (dec->is_dynamic_dpb)
@@ -1761,18 +1717,6 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
	case S5P_FIMV_R2H_CMD_COMPLETE_SEQ_RET:
	case S5P_FIMV_R2H_CMD_ENC_BUFFER_FULL_RET:
		if (ctx->type == MFCINST_DECODER) {
			if (ctx->state == MFCINST_SPECIAL_PARSING_NAL) {
				s5p_mfc_clear_int_flags();
				spin_lock_irq(&dev->condlock);
				clear_bit(ctx->num, &dev->ctx_work_bits);
				spin_unlock_irq(&dev->condlock);
				ctx->state =  MFCINST_RUNNING;
				if (clear_hw_bit(ctx) == 0)
					BUG();
				s5p_mfc_clock_off(dev);
				wake_up_ctx(ctx, reason, err);
				goto done;
			}
			s5p_mfc_handle_frame(ctx, reason, err);
		} else if (ctx->type == MFCINST_ENCODER) {
			if (reason == S5P_FIMV_R2H_CMD_SLICE_DONE_RET) {
@@ -2032,21 +1976,12 @@ int s5p_mfc_request_sec_pgtable(struct s5p_mfc_dev *dev)
		mfc_err("smc call for video page table failed. ret = %d\n", ret);
		return -1;
	}

	ion_exynos_contig_heap_info(ION_EXYNOS_ID_MFC_SH, &base, &size);
	ret = exynos_smc(SMC_DRM_MAKE_PGTABLE, SMC_FC_ID_MFC_SH(dev->id), base, size);
	if (ret) {
		mfc_err("smc call for mfc sh page table failed. ret = %d\n", ret);
		return -1;
	}

	ion_exynos_contig_heap_info(ION_EXYNOS_ID_VIDEO_EXT, &base, &size);
	ret = exynos_smc(SMC_DRM_MAKE_PGTABLE, SMC_FC_ID_VIDEO_EXT(dev->id), base, size);
	if (ret) {
		mfc_err("smc call for video ext page table failed. ret = %d\n", ret);
		return -1;
	}

	return 0;
}

@@ -2063,8 +1998,6 @@ int s5p_mfc_release_sec_pgtable(struct s5p_mfc_dev *dev)
}
#endif

static struct mutex mfc_open_mutex;

/* Open an MFC node */
static int s5p_mfc_open(struct file *file)
{
@@ -2087,8 +2020,6 @@ static int s5p_mfc_open(struct file *file)
	if (mutex_lock_interruptible(&dev->mfc_mutex))
		return -ERESTARTSYS;

	mutex_lock(&mfc_open_mutex);

	node = s5p_mfc_get_node_type(file);
	if (node == MFCNODE_INVALID) {
		mfc_err("cannot specify node type\n");
@@ -2302,7 +2233,6 @@ static int s5p_mfc_open(struct file *file)
	mfc_info_ctx("MFC open completed [%d:%d] dev = %p, ctx = %p\n",
			dev->num_drm_inst, dev->num_inst, dev, ctx);
	mutex_unlock(&dev->mfc_mutex);
	mutex_unlock(&mfc_open_mutex);
	return ret;

	/* Deinit when failure occured */
@@ -2368,7 +2298,6 @@ err_node_type:
	mfc_info_dev("MFC driver open is failed [%d:%d]\n",
			dev->num_drm_inst, dev->num_inst);
	mutex_unlock(&dev->mfc_mutex);
	mutex_unlock(&mfc_open_mutex);

err_no_device:

@@ -2394,30 +2323,26 @@ static int s5p_mfc_release(struct file *file)
	mfc_info_ctx("MFC driver release is called [%d:%d], is_drm(%d)\n",
			dev->num_drm_inst, dev->num_inst, ctx->is_drm);

	spin_lock_irq(&dev->condlock);
	set_bit(ctx->num, &dev->ctx_stop_bits);
	clear_bit(ctx->num, &dev->ctx_work_bits);
	spin_unlock_irq(&dev->condlock);
	if (need_to_wait_frame_start(ctx)) {
		ctx->state = MFCINST_ABORT;
		if (s5p_mfc_wait_for_done_ctx(ctx,
				S5P_FIMV_R2H_CMD_FRAME_DONE_RET))
			s5p_mfc_cleanup_timeout(ctx);
	}

	/* If a H/W operation is in progress, wait for it complete */
	if (need_to_wait_nal_abort(ctx)) {
		ctx->state = MFCINST_ABORT;
		if (s5p_mfc_wait_for_done_ctx(ctx,
				S5P_FIMV_R2H_CMD_NAL_ABORT_RET))
			s5p_mfc_cleanup_timeout(ctx);
	} else if (test_bit(ctx->num, &dev->hw_lock)) {
		ret = wait_event_timeout(ctx->queue,
				(test_bit(ctx->num, &dev->hw_lock) == 0),
				msecs_to_jiffies(MFC_INT_TIMEOUT));
		if (ret == 0)
			mfc_err_ctx("wait for event failed\n");			
	}

	if (ctx->type == MFCINST_ENCODER) {
		enc = ctx->enc_priv;
		if (!enc) {
			mfc_err_ctx("no mfc encoder to run\n");
			ret = -EINVAL;
			goto err_release;
			mutex_unlock(&dev->mfc_mutex);
			return -EINVAL;
		}

		if (enc->in_slice || enc->buf_full) {
@@ -2474,14 +2399,9 @@ static int s5p_mfc_release(struct file *file)
	if (!atomic_read(&dev->watchdog_run) &&
		(ctx->inst_no != MFC_NO_INSTANCE_SET)) {
		/* Wait for hw_lock == 0 for this context */
		ret = wait_event_timeout(ctx->queue,
		wait_event_timeout(ctx->queue,
				(test_bit(ctx->num, &dev->hw_lock) == 0),
				msecs_to_jiffies(MFC_INT_SHORT_TIMEOUT));
		if (ret == 0) {
			mfc_err_ctx("Waiting for hardware to finish timed out\n");
			ret = -EBUSY;
			goto err_release;
		}				

		ctx->state = MFCINST_RETURN_INST;
		spin_lock_irq(&dev->condlock);
@@ -2489,6 +2409,7 @@ static int s5p_mfc_release(struct file *file)
		spin_unlock_irq(&dev->condlock);

		/* To issue the command 'CLOSE_INSTANCE' */
		s5p_mfc_clean_ctx_int_flags(ctx);
		s5p_mfc_try_run(dev);

		/* Wait until instance is returned or timeout occured */
@@ -2546,8 +2467,10 @@ static int s5p_mfc_release(struct file *file)
					s5p_mfc_clock_off(dev);
				}

				ret = -EIO;
				goto err_release;

				mutex_unlock(&dev->mfc_mutex);

				return -EIO;
			}
		}

@@ -2612,11 +2535,6 @@ static int s5p_mfc_release(struct file *file)
		enc_cleanup_user_shared_handle(ctx);
		kfree(ctx->enc_priv);
	}

	spin_lock_irq(&dev->condlock);
	clear_bit(ctx->num, &dev->ctx_stop_bits);
	spin_unlock_irq(&dev->condlock);
	
	dev->ctx[ctx->num] = 0;
	kfree(ctx);

@@ -2626,15 +2544,6 @@ static int s5p_mfc_release(struct file *file)
	mutex_unlock(&dev->mfc_mutex);

	return 0;

err_release:
	spin_lock_irq(&dev->condlock);
	clear_bit(ctx->num, &dev->ctx_stop_bits);
	spin_unlock_irq(&dev->condlock);

	mutex_unlock(&dev->mfc_mutex);

	return ret;	
}

/* Poll */
@@ -2850,7 +2759,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
	spin_lock_init(&dev->irqlock);
	spin_lock_init(&dev->condlock);
	mutex_init(&dev->curr_rate_lock);
	mutex_init(&mfc_open_mutex);

	dev->device = &pdev->dev;
	dev->pdata = pdev->dev.platform_data;
@@ -3474,7 +3382,6 @@ static struct platform_driver s5p_mfc_driver = {
		.owner	= THIS_MODULE,
		.pm	= &s5p_mfc_pm_ops,
		.of_match_table = exynos_mfc_match,
		.suppress_bind_attrs = true,		
	},
};

Loading