Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f677e960 authored by Sushmita Susheelendra's avatar Sushmita Susheelendra
Browse files

drm/msm: Implement shared virtual memory ioctl



Shared virtual memory allows the CPU and GPU to use
the same virtual address for a buffer. This change
implements an ioctl to allow creation of SVM buffer
objects.

Change-Id: I0d929a2e37a9eeef948dc2a37250c1eb9adf6fc7
Signed-off-by: default avatarSushmita Susheelendra <ssusheel@codeaurora.org>
parent 48d9d351
Loading
Loading
Loading
Loading
+28 −2
Original line number Diff line number Diff line
@@ -559,7 +559,8 @@ static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
		return ERR_PTR(-ENOMEM);

	ctx->aspace = msm_gem_address_space_create_instance(
		priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
		priv->gpu->aspace->mmu, "gpu", 0x100000000ULL,
		TASK_SIZE_64 - 1);

	if (IS_ERR(ctx->aspace)) {
		int ret = PTR_ERR(ctx->aspace);
@@ -1141,6 +1142,20 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
			args->flags, &args->handle);
}

static int msm_ioctl_gem_svm_new(struct drm_device *dev, void *data,
		struct drm_file *file)
{
	struct drm_msm_gem_svm_new *args = data;

	if (args->flags & ~MSM_BO_FLAGS) {
		DRM_ERROR("invalid flags: %08x\n", args->flags);
		return -EINVAL;
	}

	return msm_gem_svm_new_handle(dev, file, args->hostptr, args->size,
			args->flags, &args->handle);
}

static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
{
	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
@@ -1193,6 +1208,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
	struct drm_msm_gem_info *args = data;
	struct drm_gem_object *obj;
	struct msm_gem_object *msm_obj;
	struct msm_file_private *ctx = file->driver_priv;
	int ret = 0;

@@ -1203,10 +1219,10 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
	if (!obj)
		return -ENOENT;

	msm_obj = to_msm_bo(obj);
	if (args->flags & MSM_INFO_IOVA) {
		struct msm_gem_address_space *aspace = NULL;
		struct msm_drm_private *priv = dev->dev_private;
		struct msm_gem_object *msm_obj = to_msm_bo(obj);
		uint64_t iova;

		if (msm_obj->flags & MSM_BO_SECURE && priv->gpu)
@@ -1223,6 +1239,14 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
		if (!ret)
			args->offset = iova;
	} else {
		if (msm_obj->flags & MSM_BO_SVM) {
			/*
			 * Offset for an SVM object is not needed as they are
			 * already mmap'ed before the SVM ioctl is invoked.
			 */
			ret = -EACCES;
			goto out;
		}
		args->offset = msm_gem_mmap_offset(obj);
	}

@@ -1700,6 +1724,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
			  DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync,
			  DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(MSM_GEM_SVM_NEW, msm_ioctl_gem_svm_new,
			  DRM_AUTH|DRM_RENDER_ALLOW),
};

static const struct vm_operations_struct vm_ops = {
+11 −0
Original line number Diff line number Diff line
@@ -408,6 +408,11 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv, unsigned int flags);
int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *domain,
		uint64_t hostptr, uint64_t size);
void msm_gem_release_iova(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);

void msm_gem_address_space_put(struct msm_gem_address_space *aspace);

@@ -471,6 +476,12 @@ struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		uint32_t size, struct sg_table *sgt, u32 flags);
void msm_gem_sync(struct drm_gem_object *obj, u32 op);
int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
		uint64_t hostptr, uint64_t size,
		uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
		struct drm_file *file, uint64_t hostptr,
		uint64_t size, uint32_t flags);

int msm_framebuffer_prepare(struct drm_framebuffer *fb,
		struct msm_gem_address_space *aspace);
+200 −26
Original line number Diff line number Diff line
@@ -172,10 +172,19 @@ static void put_pages(struct drm_gem_object *obj)
		sg_free_table(msm_obj->sgt);
		kfree(msm_obj->sgt);

		if (use_pages(obj))
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
		else
		if (use_pages(obj)) {
			if (msm_obj->flags & MSM_BO_SVM) {
				int npages = obj->size >> PAGE_SHIFT;

				release_pages(msm_obj->pages, npages, 0);
				kfree(msm_obj->pages);
			} else {
				drm_gem_put_pages(obj, msm_obj->pages,
						true, false);
			}
		} else {
			put_pages_vram(obj);
		}

		msm_obj->pages = NULL;
	}
@@ -202,8 +211,8 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	/* We can't mmap secure objects */
	if (msm_obj->flags & MSM_BO_SECURE) {
	/* We can't mmap secure objects or SVM objects */
	if (msm_obj->flags & (MSM_BO_SECURE | MSM_BO_SVM)) {
		drm_gem_vm_close(vma);
		return -EACCES;
	}
@@ -655,11 +664,16 @@ void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_svm_object *msm_svm_obj = NULL;

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

	if (msm_obj->flags & MSM_BO_SVM)
		msm_svm_obj = to_msm_svm_obj(msm_obj);

	list_del(&msm_obj->mm_list);

	mutex_lock(&msm_obj->lock);
@@ -688,6 +702,9 @@ void msm_gem_free_object(struct drm_gem_object *obj)
	drm_gem_object_release(obj);
	mutex_unlock(&msm_obj->lock);

	if (msm_obj->flags & MSM_BO_SVM)
		kfree(msm_svm_obj);
	else
		kfree(msm_obj);
}

@@ -711,26 +728,32 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
	return ret;
}

static inline void msm_gem_add_to_inactive_list(struct msm_gem_object *msm_obj,
		struct drm_device *dev, bool struct_mutex_locked)
/* convenience method to construct an SVM buffer object, and userspace handle */
int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
		uint64_t hostptr, uint64_t size,
		uint32_t flags, uint32_t *handle)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct drm_gem_object *obj;
	int ret;

	if (struct_mutex_locked) {
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
	obj = msm_gem_svm_new(dev, file, hostptr, size, flags);

	if (IS_ERR(obj))
		return PTR_ERR(obj);

	ret = drm_gem_handle_create(file, obj, handle);

	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(obj);

	return ret;
}

static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags, struct drm_gem_object **obj,
		bool struct_mutex_locked)
static int msm_gem_obj_init(struct drm_device *dev,
		uint32_t size, uint32_t flags,
		struct msm_gem_object *msm_obj, bool struct_mutex_locked)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gem_object *msm_obj;
	bool use_vram = false;

	switch (flags & MSM_BO_CACHE_MASK) {
@@ -752,10 +775,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
	if (WARN_ON(use_vram && !priv->vram.size))
		return -EINVAL;

	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
	if (!msm_obj)
		return -ENOMEM;

	mutex_init(&msm_obj->lock);

	if (use_vram) {
@@ -773,10 +792,33 @@ static int msm_gem_new_impl(struct drm_device *dev,
	INIT_LIST_HEAD(&msm_obj->submit_entry);
	INIT_LIST_HEAD(&msm_obj->domains);

	msm_gem_add_to_inactive_list(msm_obj, dev, struct_mutex_locked);
	if (struct_mutex_locked) {
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}

	*obj = &msm_obj->base;
	return 0;
}

static int msm_gem_new_impl(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct drm_gem_object **obj,
		bool struct_mutex_locked)
{
	struct msm_gem_object *msm_obj;
	int ret;

	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
	if (!msm_obj)
		return -ENOMEM;

	ret = msm_gem_obj_init(dev, size, flags, msm_obj, struct_mutex_locked);
	if (ret)
		return ret;

	*obj = &msm_obj->base;
	return 0;
}

@@ -828,6 +870,138 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
	return _msm_gem_new(dev, size, flags, false);
}

static struct drm_gem_object *msm_svm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags)
{
	struct msm_gem_svm_object *msm_svm_obj;
	struct msm_gem_object *msm_obj;
	int ret;

	msm_svm_obj = kzalloc(sizeof(*msm_svm_obj), GFP_KERNEL);
	if (!msm_svm_obj)
		return ERR_PTR(-ENOMEM);

	msm_obj = &msm_svm_obj->msm_obj_base;

	ret = msm_gem_obj_init(dev, size, flags | MSM_BO_SVM, msm_obj, false);
	if (ret) {
		kfree(msm_svm_obj);
		return ERR_PTR(ret);
	}

	return &msm_obj->base;
}

/* convenience method to construct an SVM GEM bo, and userspace handle */
struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
		struct drm_file *file, uint64_t hostptr,
		uint64_t size, uint32_t flags)
{
	struct drm_gem_object *obj;
	struct msm_file_private *ctx = file->driver_priv;
	struct msm_gem_address_space *aspace = ctx->aspace;
	struct msm_gem_object *msm_obj;
	struct msm_gem_svm_object *msm_svm_obj;
	struct msm_gem_vma *domain = NULL;
	struct page **p;
	int npages;
	int num_pinned = 0;
	int write;
	int ret;

	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err_once(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}

	/* hostptr and size must be page-aligned */
	if (offset_in_page(hostptr | size))
		return ERR_PTR(-EINVAL);

	/* Only CPU cached SVM objects are allowed */
	if ((flags & MSM_BO_CACHE_MASK) != MSM_BO_CACHED)
		return ERR_PTR(-EINVAL);

	/* Allocate and initialize a new msm_gem_object */
	obj = msm_svm_gem_new_impl(dev, size, flags);
	if (IS_ERR(obj))
		return obj;

	drm_gem_private_object_init(dev, obj, size);

	msm_obj = to_msm_bo(obj);
	domain = obj_add_domain(&msm_obj->base, aspace);
	if (IS_ERR(domain)) {
		drm_gem_object_unreference_unlocked(obj);
		return ERR_CAST(domain);
	}

	/* Reserve iova if not already in use, else fail */
	ret = msm_gem_reserve_iova(aspace, domain, hostptr, size);
	if (ret) {
		obj_remove_domain(domain);
		drm_gem_object_unreference_unlocked(obj);
		return ERR_PTR(ret);
	}

	msm_svm_obj = to_msm_svm_obj(msm_obj);
	msm_svm_obj->hostptr = hostptr;

	/*
	 * Get physical pages and map into smmu in the ioctl itself.
	 * The driver handles iova allocation, physical page allocation and
	 * SMMU map all in one go. If we break this, then we have to maintain
	 * state to tell if physical pages allocation/map needs to happen.
	 * For SVM, iova reservation needs to happen in the ioctl itself,
	 * so do the rest right here as well.
	 */
	npages = size >> PAGE_SHIFT;
	p = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	write = (msm_obj->flags & MSM_BO_GPU_READONLY) ? 0 : 1;
	/* This may hold mm->mmap_sem */
	num_pinned = get_user_pages_fast(hostptr, npages, write, p);
	if (num_pinned != npages) {
		ret = -EINVAL;
		goto free_pages;
	}

	msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
	if (IS_ERR(msm_obj->sgt)) {
		ret = PTR_ERR(msm_obj->sgt);
		goto free_pages;
	}

	msm_obj->pages = p;

	ret = aspace->mmu->funcs->map(aspace->mmu, domain->iova,
			msm_obj->sgt, msm_obj->flags, get_dmabuf_ptr(obj));
	if (ret)
		goto free_pages;

	kref_get(&aspace->kref);

	return obj;

free_pages:
	release_pages(p, num_pinned, 0);
	kfree(p);

fail:
	if (domain)
		msm_gem_release_iova(aspace, domain);

	obj_remove_domain(domain);
	drm_gem_object_unreference_unlocked(obj);

	return ERR_PTR(ret);
}

struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		uint32_t size, struct sg_table *sgt, u32 flags)
{
+11 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
#define MSM_BO_LOCKED        0x20000000    /* Pages have been securely locked */
#define MSM_BO_SVM           0x40000000    /* bo is SVM */

struct msm_gem_address_space {
	const char *name;
@@ -85,6 +86,16 @@ struct msm_gem_object {
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)

struct msm_gem_svm_object {
	struct msm_gem_object msm_obj_base;
	uint64_t hostptr;
};

#define to_msm_svm_obj(x) \
	((struct msm_gem_svm_object *) \
	 container_of(x, struct msm_gem_svm_object, msm_obj_base))


static inline bool is_active(struct msm_gem_object *msm_obj)
{
	return msm_obj->gpu != NULL;
+41 −9
Original line number Diff line number Diff line
@@ -98,6 +98,45 @@ static int allocate_iova(struct msm_gem_address_space *aspace,
	return ret;
}

int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma,
		uint64_t hostptr, uint64_t size)
{
	struct drm_mm *mm = &aspace->mm;
	uint64_t start = hostptr >> PAGE_SHIFT;
	uint64_t last = (hostptr + size - 1) >> PAGE_SHIFT;
	int ret;

	spin_lock(&aspace->lock);

	if (drm_mm_interval_first(mm, start, last)) {
		/* iova already in use, fail */
		spin_unlock(&aspace->lock);
		return -EADDRINUSE;
	}

	vma->node.start = hostptr >> PAGE_SHIFT;
	vma->node.size = size >> PAGE_SHIFT;
	vma->node.color = 0;

	ret = drm_mm_reserve_node(mm, &vma->node);
	if (!ret)
		vma->iova = hostptr;

	spin_unlock(&aspace->lock);

	return ret;
}

void msm_gem_release_iova(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma)
{
	spin_lock(&aspace->lock);
	if (drm_mm_node_allocated(&vma->node))
		drm_mm_remove_node(&vma->node);
	spin_unlock(&aspace->lock);
}

int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt,
		void *priv, unsigned int flags)
@@ -116,11 +155,7 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		flags, priv);

	if (ret) {
		spin_lock(&aspace->lock);
		if (drm_mm_node_allocated(&vma->node))
			drm_mm_remove_node(&vma->node);
		spin_unlock(&aspace->lock);

		msm_gem_release_iova(aspace, vma);
		return ret;
	}

@@ -138,10 +173,7 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,

	aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);

	spin_lock(&aspace->lock);
	if (drm_mm_node_allocated(&vma->node))
		drm_mm_remove_node(&vma->node);
	spin_unlock(&aspace->lock);
	msm_gem_release_iova(aspace, vma);

	vma->iova = 0;

Loading