Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3729fe2b authored by Dave Airlie's avatar Dave Airlie
Browse files

Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"



This reverts commit 031e610a, reversing
changes made to 52d2d44e.

The mm changes in there we premature and not fully ack or reviewed by core mm folks,
I dropped the ball by merging them via this tree, so lets take em all back out.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 7e4b4dfc
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -5191,7 +5191,6 @@ T: git git://people.freedesktop.org/~thomash/linux
S:	Supported
F:	drivers/gpu/drm/vmwgfx/
F:	include/uapi/drm/vmwgfx_drm.h
F:	mm/as_dirty_helpers.c

DRM DRIVERS
M:	David Airlie <airlied@linux.ie>
+0 −1
Original line number Diff line number Diff line
@@ -1739,7 +1739,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
	mutex_lock(&ttm_global_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&ttm_global_mutex);
	bdev->vm_ops = &ttm_bo_vm_ops;

	return 0;
out_no_sys:
+65 −104
Original line number Diff line number Diff line
@@ -42,6 +42,8 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>

#define TTM_BO_VM_NUM_PREFAULT 16

static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
				struct vm_fault *vmf)
{
@@ -104,30 +106,25 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
		+ page_offset;
}

/**
 * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
 * @bo: The buffer object
 * @vmf: The fault structure handed to the callback
 *
 * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
 * during long waits, and after the wait the callback will be restarted. This
 * is to allow other threads using the same virtual memory space concurrent
 * access to map(), unmap() completely unrelated buffer objects. TTM buffer
 * object reservations sometimes wait for GPU and should therefore be
 * considered long waits. This function reserves the buffer object interruptibly
 * taking this into account. Starvation is avoided by the vm system not
 * allowing too many repeated restarts.
 * This function is intended to be used in customized fault() and _mkwrite()
 * handlers.
 *
 * Return:
 *    0 on success and the bo was reserved.
 *    VM_FAULT_RETRY if blocking wait.
 *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
 */
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
			     struct vm_fault *vmf)
static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
	    vma->vm_private_data;
	struct ttm_bo_device *bdev = bo->bdev;
	unsigned long page_offset;
	unsigned long page_last;
	unsigned long pfn;
	struct ttm_tt *ttm = NULL;
	struct page *page;
	int err;
	int i;
	vm_fault_t ret = VM_FAULT_NOPAGE;
	unsigned long address = vmf->address;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];
	struct vm_area_struct cvma;

	/*
	 * Work around locking order reversal in fault / nopfn
	 * between mmap_sem and bo_reserve: Perform a trylock operation
@@ -154,55 +151,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
		return VM_FAULT_NOPAGE;
	}

	return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);

/**
 * ttm_bo_vm_fault_reserved - TTM fault helper
 * @vmf: The struct vm_fault given as argument to the fault callback
 * @prot: The page protection to be used for this memory area.
 * @num_prefault: Maximum number of prefault pages. The caller may want to
 * specify this based on madvice settings and the size of the GPU object
 * backed by the memory.
 *
 * This function inserts one or more page table entries pointing to the
 * memory backing the buffer object, and then returns a return code
 * instructing the caller to retry the page access.
 *
 * Return:
 *   VM_FAULT_NOPAGE on success or pending signal
 *   VM_FAULT_SIGBUS on unspecified error
 *   VM_FAULT_OOM on out-of-memory
 *   VM_FAULT_RETRY if retryable wait
 */
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
				    pgprot_t prot,
				    pgoff_t num_prefault)
{
	struct vm_area_struct *vma = vmf->vma;
	struct vm_area_struct cvma = *vma;
	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
	    vma->vm_private_data;
	struct ttm_bo_device *bdev = bo->bdev;
	unsigned long page_offset;
	unsigned long page_last;
	unsigned long pfn;
	struct ttm_tt *ttm = NULL;
	struct page *page;
	int err;
	pgoff_t i;
	vm_fault_t ret = VM_FAULT_NOPAGE;
	unsigned long address = vmf->address;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	/*
	 * Refuse to fault imported pages. This should be handled
	 * (if at all) by redirecting mmap to the exporter.
	 */
	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
		return VM_FAULT_SIGBUS;
	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}

	if (bdev->driver->fault_reserve_notify) {
		struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -213,9 +169,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
			break;
		case -EBUSY:
		case -ERESTARTSYS:
			return VM_FAULT_NOPAGE;
			ret = VM_FAULT_NOPAGE;
			goto out_unlock;
		default:
			return VM_FAULT_SIGBUS;
			ret = VM_FAULT_SIGBUS;
			goto out_unlock;
		}

		if (bo->moving != moving) {
@@ -231,12 +189,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
	 * move.
	 */
	ret = ttm_bo_vm_fault_idle(bo, vmf);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0)) {
		if (ret == VM_FAULT_RETRY &&
		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
			/* The BO has already been unreserved. */
			return ret;
		}

		goto out_unlock;
	}

	err = ttm_mem_io_lock(man, true);
	if (unlikely(err != 0))
		return VM_FAULT_NOPAGE;
	if (unlikely(err != 0)) {
		ret = VM_FAULT_NOPAGE;
		goto out_unlock;
	}
	err = ttm_mem_io_reserve_vm(bo);
	if (unlikely(err != 0)) {
		ret = VM_FAULT_SIGBUS;
@@ -253,8 +220,18 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		goto out_io_unlock;
	}

	cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
	if (!bo->mem.bus.is_iomem) {
	/*
	 * Make a local vma copy to modify the page_prot member
	 * and vm_flags if necessary. The vma parameter is protected
	 * by mmap_sem in write mode.
	 */
	cvma = *vma;
	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);

	if (bo->mem.bus.is_iomem) {
		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
						cvma.vm_page_prot);
	} else {
		struct ttm_operation_ctx ctx = {
			.interruptible = false,
			.no_wait_gpu = false,
@@ -263,21 +240,24 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
		};

		ttm = bo->ttm;
		if (ttm_tt_populate(bo->ttm, &ctx)) {
		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
						cvma.vm_page_prot);

		/* Allocate all page at once, most common usage */
		if (ttm_tt_populate(ttm, &ctx)) {
			ret = VM_FAULT_OOM;
			goto out_io_unlock;
		}
	} else {
		/* Iomem should not be marked encrypted */
		cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
	}

	/*
	 * Speculatively prefault a number of pages. Only error on
	 * first page.
	 */
	for (i = 0; i < num_prefault; ++i) {
	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
		if (bo->mem.bus.is_iomem) {
			/* Iomem should not be marked encrypted */
			cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
		} else {
			page = ttm->pages[page_offset];
@@ -315,26 +295,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
	ret = VM_FAULT_NOPAGE;
out_io_unlock:
	ttm_mem_io_unlock(man);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);

static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	pgprot_t prot;
	struct ttm_buffer_object *bo = vma->vm_private_data;
	vm_fault_t ret;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

	prot = vm_get_page_prot(vma->vm_flags);
	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

out_unlock:
	reservation_object_unlock(bo->resv);
	return ret;
}
@@ -434,7 +395,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
	return ret;
}

const struct vm_operations_struct ttm_bo_vm_ops = {
static const struct vm_operations_struct ttm_bo_vm_ops = {
	.fault = ttm_bo_vm_fault,
	.open = ttm_bo_vm_open,
	.close = ttm_bo_vm_close,
@@ -487,7 +448,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
	if (unlikely(ret != 0))
		goto out_unref;

	vma->vm_ops = bdev->vm_ops;
	vma->vm_ops = &ttm_bo_vm_ops;

	/*
	 * Note: We're transferring the bo reference to
@@ -519,7 +480,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)

	ttm_bo_get(bo);

	vma->vm_ops = bo->bdev->vm_ops;
	vma->vm_ops = &ttm_bo_vm_ops;
	vma->vm_private_data = bo;
	vma->vm_flags |= VM_MIXEDMAP;
	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+0 −1
Original line number Diff line number Diff line
@@ -8,7 +8,6 @@ config DRM_VMWGFX
	select FB_CFB_IMAGEBLIT
	select DRM_TTM
	select FB
	select AS_DIRTY_HELPERS
	# Only needed for the transitional use of drm_crtc_init - can be removed
	# again once vmwgfx sets up the primary plane itself.
	select DRM_KMS_HELPER
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
	    vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
	    vmwgfx_validation.o vmwgfx_page_dirty.o \
	    vmwgfx_validation.o \
	    ttm_object.o ttm_lock.o

obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
Loading