Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d70e98e authored by Puranam V G Tejaswi's avatar Puranam V G Tejaswi
Browse files

msm: kgsl: make stats updation atomic



There is a chance of messing up per process stats if something
like a free and allocation happen at the same time for memory
belonging to the same process. Similarly mapsize of memdesc can
also go bad because of concurrent access. So make per process
stats and mapsize of memdesc atomic.

Change-Id: I5da1cf368d523768d0b267b314394612934a620d
Signed-off-by: default avatarPuranam V G Tejaswi <pvgtejas@codeaurora.org>
parent e4f5f922
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -490,7 +490,8 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
		idr_remove(&entry->priv->mem_idr, entry->id);
	entry->id = 0;

	entry->priv->gpumem_mapped -= entry->memdesc.mapsize;
	atomic_long_sub(atomic_long_read(&entry->memdesc.mapsize),
			&entry->priv->gpumem_mapped);

	spin_unlock(&entry->priv->mem_lock);

@@ -4471,7 +4472,7 @@ kgsl_gpumem_vm_fault(struct vm_fault *vmf)

	ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
	if ((ret == 0) || (ret == VM_FAULT_NOPAGE))
		entry->priv->gpumem_mapped += PAGE_SIZE;
		atomic_long_add(PAGE_SIZE, &entry->priv->gpumem_mapped);

	return ret;
}
@@ -4853,8 +4854,8 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
			vm_insert_page(vma, addr, page);
			addr += PAGE_SIZE;
		}
		m->mapsize = m->size;
		entry->priv->gpumem_mapped += m->mapsize;
		atomic_long_add(m->size, &m->mapsize);
		atomic_long_add(m->size, &entry->priv->gpumem_mapped);
	}

	vma->vm_file = file;
+1 −1
Original line number Diff line number Diff line
@@ -218,7 +218,7 @@ struct kgsl_memdesc {
	uint64_t gpuaddr;
	phys_addr_t physaddr;
	uint64_t size;
	uint64_t mapsize;
	atomic_long_t mapsize;
	unsigned int priv;
	struct sg_table *sgt;
	struct kgsl_memdesc_ops *ops;
+3 −2
Original line number Diff line number Diff line
@@ -158,12 +158,13 @@ static int print_mem_entry(void *data, void *ptr)
		kgsl_get_egl_counts(entry, &egl_surface_count,
						&egl_image_count);

	seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d",
	seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16ld %6d %6d",
			(uint64_t *)(uintptr_t) m->gpuaddr,
			(unsigned long *) m->useraddr,
			m->size, entry->id, flags,
			memtype_str(usermem_type),
			usage, (m->sgt ? m->sgt->nents : 0), m->mapsize,
			usage, (m->sgt ? m->sgt->nents : 0),
			atomic_long_read(&m->mapsize),
			egl_surface_count, egl_image_count);

	if (entry->metadata[0] != 0)
+7 −6
Original line number Diff line number Diff line
@@ -451,10 +451,10 @@ struct kgsl_process_private {
	struct kobject kobj;
	struct dentry *debug_root;
	struct {
		uint64_t cur;
		atomic_long_t cur;
		uint64_t max;
	} stats[KGSL_MEM_ENTRY_MAX];
	uint64_t gpumem_mapped;
	atomic_long_t gpumem_mapped;
	struct idr syncsource_idr;
	spinlock_t syncsource_lock;
	int fd_count;
@@ -549,9 +549,10 @@ struct kgsl_device *kgsl_get_device(int dev_idx);
static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
	unsigned int type, uint64_t size)
{
	priv->stats[type].cur += size;
	if (priv->stats[type].max < priv->stats[type].cur)
		priv->stats[type].max = priv->stats[type].cur;
	u64 ret = atomic_long_add_return(size, &priv->stats[type].cur);

	if (ret > priv->stats[type].max)
		priv->stats[type].max = ret;
	add_mm_counter(current->mm, MM_UNRECLAIMABLE, (size >> PAGE_SHIFT));
}

@@ -562,7 +563,7 @@ static inline void kgsl_process_sub_stats(struct kgsl_process_private *priv,
	struct task_struct *task;
	struct mm_struct *mm;

	priv->stats[type].cur -= size;
	atomic_long_sub(size, &priv->stats[type].cur);
	pid_struct = find_get_pid(priv->pid);
	if (pid_struct) {
		task = get_pid_task(pid_struct, PIDTYPE_PID);
+11 −7
Original line number Diff line number Diff line
@@ -166,18 +166,21 @@ static ssize_t
gpumem_mapped_show(struct kgsl_process_private *priv,
				int type, char *buf)
{
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
			priv->gpumem_mapped);
	return scnprintf(buf, PAGE_SIZE, "%ld\n",
			atomic_long_read(&priv->gpumem_mapped));
}

static ssize_t
gpumem_unmapped_show(struct kgsl_process_private *priv, int type, char *buf)
{
	if (priv->gpumem_mapped > priv->stats[type].cur)
	u64 gpumem_total = atomic_long_read(&priv->stats[type].cur);
	u64 gpumem_mapped = atomic_long_read(&priv->gpumem_mapped);

	if (gpumem_mapped > gpumem_total)
		return -EIO;

	return scnprintf(buf, PAGE_SIZE, "%llu\n",
			priv->stats[type].cur - priv->gpumem_mapped);
			gpumem_total - gpumem_mapped);
}

static struct kgsl_mem_entry_attribute debug_memstats[] = {
@@ -194,7 +197,8 @@ static struct kgsl_mem_entry_attribute debug_memstats[] = {
static ssize_t
mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
{
	return scnprintf(buf, PAGE_SIZE, "%llu\n", priv->stats[type].cur);
	return scnprintf(buf, PAGE_SIZE, "%ld\n",
			atomic_long_read(&priv->stats[type].cur));
}

/**
@@ -465,7 +469,7 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
		get_page(page);
		vmf->page = page;

		memdesc->mapsize += PAGE_SIZE;
		atomic_long_add(PAGE_SIZE, &memdesc->mapsize);

		return 0;
	}
@@ -647,7 +651,7 @@ static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
	else if (ret == -EFAULT)
		return VM_FAULT_SIGBUS;

	memdesc->mapsize += PAGE_SIZE;
	atomic_long_add(PAGE_SIZE, &memdesc->mapsize);

	return VM_FAULT_NOPAGE;
}