Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08e41f62 authored by Kieran Bingham's avatar Kieran Bingham Committed by Mauro Carvalho Chehab
Browse files

media: vsp1: Use header display lists for all WPF outputs linked to the DU



Header mode display lists are now supported on all WPF outputs. To
support extended headers and auto-fld capabilities for interlaced mode
handling only header mode display lists can be used.

Disable the headerless display list configuration, and remove the dead
code.

Signed-off-by: default avatarKieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
parent 177fb098
Loading
Loading
Loading
Loading
+27 −81
Original line number Original line Diff line number Diff line
@@ -95,7 +95,7 @@ struct vsp1_dl_body_pool {
 * struct vsp1_dl_list - Display list
 * struct vsp1_dl_list - Display list
 * @list: entry in the display list manager lists
 * @list: entry in the display list manager lists
 * @dlm: the display list manager
 * @dlm: the display list manager
 * @header: display list header, NULL for headerless lists
 * @header: display list header
 * @dma: DMA address for the header
 * @dma: DMA address for the header
 * @body0: first display list body
 * @body0: first display list body
 * @bodies: list of extra display list bodies
 * @bodies: list of extra display list bodies
@@ -119,15 +119,9 @@ struct vsp1_dl_list {
	bool internal;
	bool internal;
};
};


enum vsp1_dl_mode {
	VSP1_DL_MODE_HEADER,
	VSP1_DL_MODE_HEADERLESS,
};

/**
/**
 * struct vsp1_dl_manager - Display List manager
 * struct vsp1_dl_manager - Display List manager
 * @index: index of the related WPF
 * @index: index of the related WPF
 * @mode: display list operation mode (header or headerless)
 * @singleshot: execute the display list in single-shot mode
 * @singleshot: execute the display list in single-shot mode
 * @vsp1: the VSP1 device
 * @vsp1: the VSP1 device
 * @lock: protects the free, active, queued, and pending lists
 * @lock: protects the free, active, queued, and pending lists
@@ -139,7 +133,6 @@ enum vsp1_dl_mode {
 */
 */
struct vsp1_dl_manager {
struct vsp1_dl_manager {
	unsigned int index;
	unsigned int index;
	enum vsp1_dl_mode mode;
	bool singleshot;
	bool singleshot;
	struct vsp1_device *vsp1;
	struct vsp1_device *vsp1;


@@ -319,6 +312,7 @@ void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
{
{
	struct vsp1_dl_list *dl;
	struct vsp1_dl_list *dl;
	size_t header_offset;


	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
	if (!dl)
@@ -331,16 +325,14 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
	dl->body0 = vsp1_dl_body_get(dlm->pool);
	dl->body0 = vsp1_dl_body_get(dlm->pool);
	if (!dl->body0)
	if (!dl->body0)
		return NULL;
		return NULL;
	if (dlm->mode == VSP1_DL_MODE_HEADER) {

		size_t header_offset = dl->body0->max_entries
	header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
				     * sizeof(*dl->body0->entries);


	dl->header = ((void *)dl->body0->entries) + header_offset;
	dl->header = ((void *)dl->body0->entries) + header_offset;
	dl->dma = dl->body0->dma + header_offset;
	dl->dma = dl->body0->dma + header_offset;


	memset(dl->header, 0, sizeof(*dl->header));
	memset(dl->header, 0, sizeof(*dl->header));
	dl->header->lists[0].addr = dl->body0->dma;
	dl->header->lists[0].addr = dl->body0->dma;
	}


	return dl;
	return dl;
}
}
@@ -472,16 +464,9 @@ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
 *
 *
 * The reference must be explicitly released by a call to vsp1_dl_body_put()
 * The reference must be explicitly released by a call to vsp1_dl_body_put()
 * when the body isn't needed anymore.
 * when the body isn't needed anymore.
 *
 * Additional bodies are only usable for display lists in header mode.
 * Attempting to add a body to a header-less display list will return an error.
 */
 */
int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
{
{
	/* Multi-body lists are only available in header mode. */
	if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
		return -EINVAL;

	refcount_inc(&dlb->refcnt);
	refcount_inc(&dlb->refcnt);


	list_add_tail(&dlb->list, &dl->bodies);
	list_add_tail(&dlb->list, &dl->bodies);
@@ -502,17 +487,10 @@ int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
 * Adding a display list to a chain passes ownership of the display list to
 * Adding a display list to a chain passes ownership of the display list to
 * the head display list item. The chain is released when the head dl item is
 * the head display list item. The chain is released when the head dl item is
 * put back with __vsp1_dl_list_put().
 * put back with __vsp1_dl_list_put().
 *
 * Chained display lists are only usable in header mode. Attempts to add a
 * display list to a chain in header-less mode will return an error.
 */
 */
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
			   struct vsp1_dl_list *dl)
			   struct vsp1_dl_list *dl)
{
{
	/* Chained lists are only available in header mode. */
	if (head->dlm->mode != VSP1_DL_MODE_HEADER)
		return -EINVAL;

	head->has_chain = true;
	head->has_chain = true;
	list_add_tail(&dl->chain, &head->chain);
	list_add_tail(&dl->chain, &head->chain);
	return 0;
	return 0;
@@ -580,17 +558,10 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
		return false;
		return false;


	/*
	/*
	 * Check whether the VSP1 has taken the update. In headerless mode the
	 * Check whether the VSP1 has taken the update. The hardware indicates
	 * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
	 * this by clearing the UPDHDR bit in the CMD register.
	 * register, and in header mode by clearing the UPDHDR bit in the CMD
	 */
	 * register.
	return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
	 */
	if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
		return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
			  & VI6_DL_BODY_SIZE_UPD);
	else
		return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
			  & VI6_CMD_UPDHDR);
}
}


static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
@@ -598,27 +569,15 @@ static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
	struct vsp1_dl_manager *dlm = dl->dlm;
	struct vsp1_dl_manager *dlm = dl->dlm;
	struct vsp1_device *vsp1 = dlm->vsp1;
	struct vsp1_device *vsp1 = dlm->vsp1;


	if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
		/*
		 * In headerless mode, program the hardware directly with the
		 * display list body address and size and set the UPD bit. The
		 * bit will be cleared by the hardware when the display list
		 * processing starts.
		 */
		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0->dma);
		vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
			(dl->body0->num_entries * sizeof(*dl->header->lists)));
	} else {
	/*
	/*
		 * In header mode, program the display list header address. If
	 * Program the display list header address. If the hardware is idle
		 * the hardware is idle (single-shot mode or first frame in
	 * (single-shot mode or first frame in continuous mode) it will then be
		 * continuous mode) it will then be started independently. If
	 * started independently. If the hardware is operating, the
		 * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
	 * VI6_DL_HDR_REF_ADDR register will be updated with the display list
		 * will be updated with the display list address.
	 * address.
	 */
	 */
	vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
	vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
}
}
}


static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
{
{
@@ -675,7 +634,6 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
	struct vsp1_dl_list *dl_next;
	struct vsp1_dl_list *dl_next;
	unsigned long flags;
	unsigned long flags;


	if (dlm->mode == VSP1_DL_MODE_HEADER) {
	/* Fill the header for the head and chained display lists. */
	/* Fill the header for the head and chained display lists. */
	vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
	vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));


@@ -684,7 +642,6 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)


		vsp1_dl_list_fill_header(dl_next, last);
		vsp1_dl_list_fill_header(dl_next, last);
	}
	}
	}


	dl->internal = internal;
	dl->internal = internal;


@@ -712,7 +669,7 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
 * has completed at frame end. If the flag is not returned display list
 * has completed at frame end. If the flag is not returned display list
 * completion has been delayed by one frame because the display list commit
 * completion has been delayed by one frame because the display list commit
 * raced with the frame end interrupt. The function always returns with the flag
 * raced with the frame end interrupt. The function always returns with the flag
 * set in header mode as display list processing is then not continuous and
 * set in single-shot mode as display list processing is then not continuous and
 * races never occur.
 * races never occur.
 *
 *
 * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
 * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
@@ -784,13 +741,6 @@ void vsp1_dlm_setup(struct vsp1_device *vsp1)
		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
		 | VI6_DL_CTRL_DLE;
		 | VI6_DL_CTRL_DLE;


	/*
	 * The DRM pipeline operates with display lists in Continuous Frame
	 * Mode, all other pipelines use manual start.
	 */
	if (vsp1->drm)
		ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;

	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
}
}
@@ -830,8 +780,6 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
		return NULL;
		return NULL;


	dlm->index = index;
	dlm->index = index;
	dlm->mode = index == 0 && !vsp1->info->uapi
		  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
	dlm->singleshot = vsp1->info->uapi;
	dlm->singleshot = vsp1->info->uapi;
	dlm->vsp1 = vsp1;
	dlm->vsp1 = vsp1;


@@ -840,14 +788,12 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,


	/*
	/*
	 * Initialize the display list body and allocate DMA memory for the body
	 * Initialize the display list body and allocate DMA memory for the body
	 * and the optional header. Both are allocated together to avoid memory
	 * and the header. Both are allocated together to avoid memory
	 * fragmentation, with the header located right after the body in
	 * fragmentation, with the header located right after the body in
	 * memory. An extra body is allocated on top of the prealloc to account
	 * memory. An extra body is allocated on top of the prealloc to account
	 * for the cached body used by the vsp1_pipeline object.
	 * for the cached body used by the vsp1_pipeline object.
	 */
	 */
	header_size = dlm->mode == VSP1_DL_MODE_HEADER
	header_size = ALIGN(sizeof(struct vsp1_dl_header), 8);
		    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
		    : 0;


	dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
	dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
					     VSP1_DL_NUM_ENTRIES, header_size);
					     VSP1_DL_NUM_ENTRIES, header_size);