Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92d45033 authored by Vijay Kumar B's avatar Vijay Kumar B Committed by Greg Kroah-Hartman
Browse files

Staging: poch: Remove circular buffer header



Remove the circular buffer header. Which has been superseded by the
ioctl consume interface.

Signed-off-by: default avatarVijay Kumar B <vijaykumar@bravegnu.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent dad17401
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
TODO:
	- Rx block size is limited to < 2048, hardware bug?
	- Group size is limited to < page size, kernel alloc/mmap API issues
	- fix/workaround cache issues in circular buffer header
	- test whether Tx is transmitting data from provided buffers
	- handle device unplug case
	- handle temperature above threshold
+8 −101
Original line number Diff line number Diff line
@@ -197,9 +197,6 @@ struct channel_info {

	/* Contains the header and circular buffer exported to userspace. */
	spinlock_t group_offsets_lock;
	struct poch_cbuf_header *header;
	struct page *header_pg;
	unsigned long header_size;

	/* Last group consumed by user space. */
	unsigned int consumed;
@@ -329,14 +326,12 @@ static ssize_t show_mmap_size(struct device *dev,
	int len;
	unsigned long mmap_size;
	unsigned long group_pages;
	unsigned long header_pages;
	unsigned long total_group_pages;

	group_pages = npages(channel->group_size);
	header_pages = npages(channel->header_size);
	total_group_pages = group_pages * channel->group_count;

	mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
	mmap_size = total_group_pages * PAGE_SIZE;
	len = sprintf(buf, "%lu\n", mmap_size);
	return len;
}
@@ -369,10 +364,8 @@ static int poch_channel_alloc_groups(struct channel_info *channel)
{
	unsigned long i;
	unsigned long group_pages;
	unsigned long header_pages;

	group_pages = npages(channel->group_size);
	header_pages = npages(channel->header_size);

	for (i = 0; i < channel->group_count; i++) {
		struct poch_group_info *group;
@@ -402,8 +395,7 @@ static int poch_channel_alloc_groups(struct channel_info *channel)
		 * this?
		 */
		group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
		group->user_offset =
			(header_pages + (i * group_pages)) * PAGE_SIZE;
		group->user_offset = (i * group_pages) * PAGE_SIZE;

		printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
		       group->user_offset);
@@ -525,56 +517,6 @@ static void channel_dma_init(struct channel_info *channel)

}

static int poch_channel_alloc_header(struct channel_info *channel)
{
	struct poch_cbuf_header *header = channel->header;
	unsigned long group_offset_size;
	unsigned long tot_group_offsets_size;

	/* Allocate memory to hold header exported userspace */
	group_offset_size = sizeof(header->group_offsets[0]);
	tot_group_offsets_size = group_offset_size * channel->group_count;
	channel->header_size = sizeof(*header) + tot_group_offsets_size;
	channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
					 get_order(channel->header_size));
	if (!channel->header_pg)
		return -ENOMEM;

	channel->header = page_address(channel->header_pg);

	return 0;
}

static void poch_channel_free_header(struct channel_info *channel)
{
	unsigned int order;

	order = get_order(channel->header_size);
	__free_pages(channel->header_pg, order);
}

static void poch_channel_init_header(struct channel_info *channel)
{
	int i;
	struct poch_group_info *groups;
	s32 *group_offsets;

	channel->header->group_size_bytes = channel->group_size;
	channel->header->group_count = channel->group_count;

	spin_lock_init(&channel->group_offsets_lock);

	group_offsets = channel->header->group_offsets;
	groups = channel->groups;

	for (i = 0; i < channel->group_count; i++) {
		if (channel->dir == CHANNEL_DIR_RX)
			group_offsets[i] = -1;
		else
			group_offsets[i] = groups[i].user_offset;
	}
}

static void __poch_channel_clear_counters(struct channel_info *channel)
{
	channel->counters.pll_unlock = 0;
@@ -617,12 +559,6 @@ static int poch_channel_init(struct channel_info *channel,
		goto out_free_group_info;
	}

	ret = poch_channel_alloc_header(channel);
	if (ret) {
		dev_err(dev, "error allocating user space header\n");
		goto out_free_groups;
	}

	channel->fpga_iomem = poch_dev->fpga_iomem;
	channel->bridge_iomem = poch_dev->bridge_iomem;
	channel->iomem_lock = &poch_dev->iomem_lock;
@@ -630,14 +566,8 @@ static int poch_channel_init(struct channel_info *channel,

	__poch_channel_clear_counters(channel);

	printk(KERN_WARNING "poch_channel_init_header\n");

	poch_channel_init_header(channel);

	return 0;

 out_free_groups:
	poch_channel_free_groups(channel);
 out_free_group_info:
	kfree(channel->groups);
 out:
@@ -881,7 +811,6 @@ static int poch_release(struct inode *inode, struct file *filp)
	}

	atomic_dec(&channel->inited);
	poch_channel_free_header(channel);
	poch_channel_free_groups(channel);
	kfree(channel->groups);
	atomic_inc(&channel->free);
@@ -890,7 +819,7 @@ static int poch_release(struct inode *inode, struct file *filp)
}

/*
 * Map the header and the group buffers, to user space.
 * Map the the group buffers, to user space.
 */
static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -900,7 +829,6 @@ static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
	unsigned long size;

	unsigned long group_pages;
	unsigned long header_pages;
	unsigned long total_group_pages;

	int pg_num;
@@ -917,30 +845,16 @@ static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
	}

	group_pages = npages(channel->group_size);
	header_pages = npages(channel->header_size);
	total_group_pages = group_pages * channel->group_count;

	size = vma->vm_end - vma->vm_start;
	if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
	if (size != total_group_pages * PAGE_SIZE) {
		printk(KERN_WARNING PFX "required %lu bytes\n", size);
		return -EINVAL;
	}

	start = vma->vm_start;

	/* FIXME: Cleanup required on failure? */
	pg = channel->header_pg;
	for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
		printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
		printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
		ret = vm_insert_page(vma, start, pg);
		if (ret) {
			printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
			return ret;
		}
		start += PAGE_SIZE;
	}

	for (i = 0; i < channel->group_count; i++) {
		pg = channel->groups[i].pg;
		for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
@@ -967,20 +881,16 @@ static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
 */
static int poch_channel_available(struct channel_info *channel)
{
	int i;
	int available = 0;

	spin_lock_irq(&channel->group_offsets_lock);

	for (i = 0; i < channel->group_count; i++) {
		if (channel->header->group_offsets[i] != -1) {
			spin_unlock_irq(&channel->group_offsets_lock);
			return 1;
		}
	}
	if (channel->consumed != channel->transfer)
		available = 1;

	spin_unlock_irq(&channel->group_offsets_lock);

	return 0;
	return available;
}

static unsigned int poch_poll(struct file *filp, poll_table *pt)
@@ -1138,7 +1048,6 @@ static void poch_irq_dma(struct channel_info *channel)
	long groups_done;
	unsigned long i, j;
	struct poch_group_info *groups;
	s32 *group_offsets;
	u32 curr_group_reg;

	if (!atomic_read(&channel->inited))
@@ -1158,14 +1067,12 @@ static void poch_irq_dma(struct channel_info *channel)
	if (groups_done <= 0)
		groups_done += channel->group_count;

	group_offsets = channel->header->group_offsets;
	groups = channel->groups;

	spin_lock(&channel->group_offsets_lock);

	for (i = 0; i < groups_done; i++) {
		j = (prev_transfer + i) % channel->group_count;
		group_offsets[j] = groups[j].user_offset;

		channel->transfer += 1;
		channel->transfer %= channel->group_count;
+0 −6
Original line number Diff line number Diff line
@@ -7,12 +7,6 @@
 * include/linux for final version.
 *
 */
struct poch_cbuf_header {
	__s32 group_size_bytes;
	__s32 group_count;
	__s32 group_offsets[0];
};

struct poch_counters {
	__u32 fifo_empty;
	__u32 fifo_overflow;