Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 692a576b authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: don't call shmem_read_mapping unnecessarily



This speeds up pwrite and pread from ~120 µs ro ~100 µs for
reading/writing 1mb on my snb (if the backing storage pages
are already pinned, of course).

v2: Chris Wilson pointed out a glaring page reference bug - I've
unconditionally dropped the reference. With that fixed (and the
associated reduction of dirt in dmesg) it's now even a notch faster.

v3: Unconditionaly grab a page reference when dropping
dev->struct_mutex to simplify the code-flow.

Tested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 3ae53783
Loading
Loading
Loading
Loading
+30 −12
Original line number Diff line number Diff line
@@ -301,6 +301,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
	int hit_slowpath = 0;
	int needs_clflush = 0;
	int release_page;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;
@@ -335,11 +336,17 @@ i915_gem_shmem_pread(struct drm_device *dev,
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

		if (obj->pages) {
			page = obj->pages[offset >> PAGE_SHIFT];
			release_page = 0;
		} else {
			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
				goto out;
			}
			release_page = 1;
		}

		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;
@@ -358,7 +365,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
		}

		hit_slowpath = 1;

		page_cache_get(page);
		mutex_unlock(&dev->struct_mutex);

		vaddr = kmap(page);
@@ -377,8 +384,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
		kunmap(page);

		mutex_lock(&dev->struct_mutex);
		page_cache_release(page);
next_page:
		mark_page_accessed(page);
		if (release_page)
			page_cache_release(page);

		if (ret) {
@@ -660,6 +669,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
	int shmem_page_offset, page_length, ret = 0;
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
	int hit_slowpath = 0;
	int release_page;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;
@@ -684,11 +694,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

		if (obj->pages) {
			page = obj->pages[offset >> PAGE_SHIFT];
			release_page = 0;
		} else {
			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
				goto out;
			}
			release_page = 1;
		}

		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;
@@ -705,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
		}

		hit_slowpath = 1;

		page_cache_get(page);
		mutex_unlock(&dev->struct_mutex);

		vaddr = kmap(page);
@@ -720,9 +736,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
		kunmap(page);

		mutex_lock(&dev->struct_mutex);
		page_cache_release(page);
next_page:
		set_page_dirty(page);
		mark_page_accessed(page);
		if (release_page)
			page_cache_release(page);

		if (ret) {