Loading drivers/gpu/drm/i915/i915_drv.h +4 −4 Original line number Diff line number Diff line Loading @@ -1086,7 +1086,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_flush_ring(struct drm_device *dev, int __must_check i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); Loading drivers/gpu/drm/i915/i915_gem.c +68 −34 Original line number Diff line number Diff line Loading @@ -35,16 +35,16 @@ #include <linux/swap.h> #include <linux/pci.h> static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); static void i915_gem_clear_fence_reg(struct drm_device *dev, Loading Loading @@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; } void int i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { if (ring->flush(ring, invalidate_domains, flush_domains) == 0) int ret; ret = ring->flush(ring, invalidate_domains, flush_domains); if (ret) return ret; i915_gem_process_flushing_list(dev, flush_domains, ring); return 0; } static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { int ret; if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; if (!list_empty(&ring->gpu_write_list)) i915_gem_flush_ring(dev, ring, if (!list_empty(&ring->gpu_write_list)) { ret = i915_gem_flush_ring(dev, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; } return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), ring); Loading Loading @@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, int ret; if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->last_fenced_ring, 0, obj->base.write_domain); if (ret) return ret; } obj->fenced_gpu_access = false; } Loading Loading @@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, return ret; } else if (obj->tiling_changed) { if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, obj->ring, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); if (ret) return ret; } obj->fenced_gpu_access = false; } Loading Loading @@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) } /** Flushes any GPU write domain for the object if it's dirty. */ static void static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return; return 0; /* Queue the GPU write cache flushing we need. */ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); BUG_ON(obj->base.write_domain); return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ Loading Loading @@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->gtt_space == NULL) return -EINVAL; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj, true); if (ret) Loading Loading @@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) return -EINVAL; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; /* Currently, we are always called from an non-interruptible context. */ if (pipelined != obj->ring) { Loading @@ -2964,12 +2987,17 @@ int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, bool interruptible) { int ret; if (!obj->active) return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, obj->ring, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); if (ret) return ret; } return i915_gem_object_wait_rendering(obj, interruptible); } Loading @@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) uint32_t old_write_domain, old_read_domains; int ret; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; Loading Loading @@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; Loading Loading @@ -3374,7 +3408,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * flush earlier is beneficial. */ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { i915_gem_flush_ring(dev, obj->ring, ret = i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); } else if (obj->ring->outstanding_lazy_request == obj->last_rendering_seqno) { Loading drivers/gpu/drm/i915/i915_gem_execbuffer.c +18 −10 Original line number Diff line number Diff line Loading @@ -713,14 +713,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, return ret; } static void static int i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; int i; int i, ret; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); Loading @@ -730,13 +730,19 @@ i915_gem_execbuffer_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { for (i = 0; i < I915_NUM_RINGS; i++) if (flush_rings & (1 << i)) i915_gem_flush_ring(dev, &dev_priv->ring[i], if (flush_rings & (1 << i)) { ret = i915_gem_flush_ring(dev, &dev_priv->ring[i], invalidate_domains, flush_domains); if (ret) return ret; } } return 0; } static int i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to) Loading Loading @@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, cd.invalidate_domains, cd.flush_domains); #endif i915_gem_execbuffer_flush(ring->dev, ret = i915_gem_execbuffer_flush(ring->dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); if (ret) return ret; } list_for_each_entry(obj, objects, exec_list) { Loading Loading
drivers/gpu/drm/i915/i915_drv.h +4 −4 Original line number Diff line number Diff line Loading @@ -1086,7 +1086,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_flush_ring(struct drm_device *dev, int __must_check i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); Loading
drivers/gpu/drm/i915/i915_gem.c +68 −34 Original line number Diff line number Diff line Loading @@ -35,16 +35,16 @@ #include <linux/swap.h> #include <linux/pci.h> static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); static void i915_gem_clear_fence_reg(struct drm_device *dev, Loading Loading @@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; } void int i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { if (ring->flush(ring, invalidate_domains, flush_domains) == 0) int ret; ret = ring->flush(ring, invalidate_domains, flush_domains); if (ret) return ret; i915_gem_process_flushing_list(dev, flush_domains, ring); return 0; } static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { int ret; if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; if (!list_empty(&ring->gpu_write_list)) i915_gem_flush_ring(dev, ring, if (!list_empty(&ring->gpu_write_list)) { ret = i915_gem_flush_ring(dev, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; } return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), ring); Loading Loading @@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, int ret; if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->last_fenced_ring, 0, obj->base.write_domain); if (ret) return ret; } obj->fenced_gpu_access = false; } Loading Loading @@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, return ret; } else if (obj->tiling_changed) { if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, obj->ring, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); if (ret) return ret; } obj->fenced_gpu_access = false; } Loading Loading @@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) } /** Flushes any GPU write domain for the object if it's dirty. */ static void static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return; return 0; /* Queue the GPU write cache flushing we need. */ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); BUG_ON(obj->base.write_domain); return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ Loading Loading @@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->gtt_space == NULL) return -EINVAL; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj, true); if (ret) Loading Loading @@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) return -EINVAL; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; /* Currently, we are always called from an non-interruptible context. */ if (pipelined != obj->ring) { Loading @@ -2964,12 +2987,17 @@ int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, bool interruptible) { int ret; if (!obj->active) return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) i915_gem_flush_ring(obj->base.dev, obj->ring, if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); if (ret) return ret; } return i915_gem_object_wait_rendering(obj, interruptible); } Loading @@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) uint32_t old_write_domain, old_read_domains; int ret; i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; Loading Loading @@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; Loading Loading @@ -3374,7 +3408,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * flush earlier is beneficial. */ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { i915_gem_flush_ring(dev, obj->ring, ret = i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); } else if (obj->ring->outstanding_lazy_request == obj->last_rendering_seqno) { Loading
drivers/gpu/drm/i915/i915_gem_execbuffer.c +18 −10 Original line number Diff line number Diff line Loading @@ -713,14 +713,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, return ret; } static void static int i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; int i; int i, ret; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); Loading @@ -730,13 +730,19 @@ i915_gem_execbuffer_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { for (i = 0; i < I915_NUM_RINGS; i++) if (flush_rings & (1 << i)) i915_gem_flush_ring(dev, &dev_priv->ring[i], if (flush_rings & (1 << i)) { ret = i915_gem_flush_ring(dev, &dev_priv->ring[i], invalidate_domains, flush_domains); if (ret) return ret; } } return 0; } static int i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to) Loading Loading @@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, cd.invalidate_domains, cd.flush_domains); #endif i915_gem_execbuffer_flush(ring->dev, ret = i915_gem_execbuffer_flush(ring->dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); if (ret) return ret; } list_for_each_entry(obj, objects, exec_list) { Loading