Loading drivers/gpu/drm/i915/i915_gem_context.c +1 −1 Original line number Diff line number Diff line Loading @@ -660,7 +660,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, last_reg); intel_ring_emit(ring, engine->scratch.gtt_offset); intel_ring_emit(ring, engine->scratch->node.start); intel_ring_emit(ring, MI_NOOP); } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); Loading drivers/gpu/drm/i915/i915_gpu_error.c +1 −1 Original line number Diff line number Diff line Loading @@ -1101,7 +1101,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, if (HAS_BROKEN_CS_TLB(dev_priv)) ee->wa_batchbuffer = i915_error_ggtt_object_create(dev_priv, engine->scratch.obj); engine->scratch->obj); if (request->ctx->engine[i].state) { ee->ctx = i915_error_ggtt_object_create(dev_priv, Loading drivers/gpu/drm/i915/intel_display.c +1 −1 Original line number Diff line number Diff line Loading @@ -11795,7 +11795,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, DERRMR); intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256); intel_ring_emit(ring, req->engine->scratch->node.start + 256); if (IS_GEN8(dev)) { intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); Loading drivers/gpu/drm/i915/intel_lrc.c +9 −9 Original line number Diff line number Diff line Loading @@ -914,7 +914,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, engine->scratch->node.start + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); Loading @@ -932,7 +932,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, engine->scratch->node.start + 256); wa_ctx_emit(batch, index, 0); return index; Loading Loading @@ -993,7 +993,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | Loading Loading @@ -1072,8 +1072,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:kbl */ /* Actual scratch location is at 128 bytes offset */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { uint32_t scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | Loading Loading @@ -1215,7 +1215,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) } /* some WA perform writes to scratch page, ensure it is valid */ if (engine->scratch.obj == NULL) { if (!engine->scratch) { DRM_ERROR("scratch page not allocated for %s\n", engine->name); return -EINVAL; } Loading Loading @@ -1483,7 +1483,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, { struct intel_ring *ring = request->ring; struct intel_engine_cs *engine = request->engine; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; Loading Loading @@ -1844,11 +1844,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine) else engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; engine->cleanup = intel_fini_pipe_control; engine->cleanup = intel_engine_cleanup_scratch; engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; ret = intel_init_pipe_control(engine, 4096); ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; Loading drivers/gpu/drm/i915/intel_ringbuffer.c +31 −24 Original line number Diff line number Diff line Loading @@ -176,7 +176,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); Loading Loading @@ -212,7 +212,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -286,7 +286,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -370,7 +370,8 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req, static int gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -612,45 +613,51 @@ static int init_ring_common(struct intel_engine_cs *engine) return ret; } void intel_fini_pipe_control(struct intel_engine_cs *engine) void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) { if (engine->scratch.obj == NULL) struct i915_vma *vma; vma = fetch_and_zero(&engine->scratch); if (!vma) return; i915_gem_object_ggtt_unpin(engine->scratch.obj); i915_gem_object_put(engine->scratch.obj); engine->scratch.obj = NULL; i915_vma_unpin(vma); i915_vma_put(vma); } int intel_init_pipe_control(struct intel_engine_cs *engine, int size) int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) { struct drm_i915_gem_object *obj; struct i915_vma *vma; int ret; WARN_ON(engine->scratch.obj); WARN_ON(engine->scratch); obj = i915_gem_object_create_stolen(&engine->i915->drm, size); if (!obj) obj = i915_gem_object_create(&engine->i915->drm, size); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate scratch page\n"); ret = PTR_ERR(obj); goto err; return PTR_ERR(obj); } ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH); vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unref; } ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); if (ret) goto err_unref; engine->scratch.obj = obj; engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", engine->name, engine->scratch.gtt_offset); engine->scratch = vma; DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n", engine->name, vma->node.start); return 0; err_unref: i915_gem_object_put(engine->scratch.obj); err: i915_gem_object_put(obj); return ret; } Loading Loading @@ -1305,7 +1312,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine) dev_priv->semaphore_obj = NULL; } intel_fini_pipe_control(engine); intel_engine_cleanup_scratch(engine); } static int gen8_rcs_signal(struct drm_i915_gem_request *req) Loading Loading @@ -1763,7 +1770,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req, unsigned int dispatch_flags) { struct intel_ring *ring = req->ring; u32 cs_offset = req->engine->scratch.gtt_offset; u32 cs_offset = req->engine->scratch->node.start; int ret; ret = intel_ring_begin(req, 6); Loading Loading @@ -2793,11 +2800,11 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) return ret; if (INTEL_GEN(dev_priv) >= 6) { ret = intel_init_pipe_control(engine, 4096); ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; } else if (HAS_BROKEN_CS_TLB(dev_priv)) { ret = intel_init_pipe_control(engine, I830_WA_SIZE); ret = intel_engine_create_scratch(engine, I830_WA_SIZE); if (ret) return ret; } Loading Loading
drivers/gpu/drm/i915/i915_gem_context.c +1 −1 Original line number Diff line number Diff line Loading @@ -660,7 +660,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, last_reg); intel_ring_emit(ring, engine->scratch.gtt_offset); intel_ring_emit(ring, engine->scratch->node.start); intel_ring_emit(ring, MI_NOOP); } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); Loading
drivers/gpu/drm/i915/i915_gpu_error.c +1 −1 Original line number Diff line number Diff line Loading @@ -1101,7 +1101,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, if (HAS_BROKEN_CS_TLB(dev_priv)) ee->wa_batchbuffer = i915_error_ggtt_object_create(dev_priv, engine->scratch.obj); engine->scratch->obj); if (request->ctx->engine[i].state) { ee->ctx = i915_error_ggtt_object_create(dev_priv, Loading
drivers/gpu/drm/i915/intel_display.c +1 −1 Original line number Diff line number Diff line Loading @@ -11795,7 +11795,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, DERRMR); intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256); intel_ring_emit(ring, req->engine->scratch->node.start + 256); if (IS_GEN8(dev)) { intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); Loading
drivers/gpu/drm/i915/intel_lrc.c +9 −9 Original line number Diff line number Diff line Loading @@ -914,7 +914,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, engine->scratch->node.start + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); Loading @@ -932,7 +932,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, engine->scratch->node.start + 256); wa_ctx_emit(batch, index, 0); return index; Loading Loading @@ -993,7 +993,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | Loading Loading @@ -1072,8 +1072,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:kbl */ /* Actual scratch location is at 128 bytes offset */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { uint32_t scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | Loading Loading @@ -1215,7 +1215,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) } /* some WA perform writes to scratch page, ensure it is valid */ if (engine->scratch.obj == NULL) { if (!engine->scratch) { DRM_ERROR("scratch page not allocated for %s\n", engine->name); return -EINVAL; } Loading Loading @@ -1483,7 +1483,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, { struct intel_ring *ring = request->ring; struct intel_engine_cs *engine = request->engine; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = engine->scratch->node.start + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; Loading Loading @@ -1844,11 +1844,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine) else engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; engine->cleanup = intel_fini_pipe_control; engine->cleanup = intel_engine_cleanup_scratch; engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; ret = intel_init_pipe_control(engine, 4096); ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; Loading
drivers/gpu/drm/i915/intel_ringbuffer.c +31 −24 Original line number Diff line number Diff line Loading @@ -176,7 +176,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); Loading Loading @@ -212,7 +212,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -286,7 +286,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -370,7 +370,8 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req, static int gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = req->engine->scratch->node.start + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; Loading Loading @@ -612,45 +613,51 @@ static int init_ring_common(struct intel_engine_cs *engine) return ret; } void intel_fini_pipe_control(struct intel_engine_cs *engine) void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) { if (engine->scratch.obj == NULL) struct i915_vma *vma; vma = fetch_and_zero(&engine->scratch); if (!vma) return; i915_gem_object_ggtt_unpin(engine->scratch.obj); i915_gem_object_put(engine->scratch.obj); engine->scratch.obj = NULL; i915_vma_unpin(vma); i915_vma_put(vma); } int intel_init_pipe_control(struct intel_engine_cs *engine, int size) int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) { struct drm_i915_gem_object *obj; struct i915_vma *vma; int ret; WARN_ON(engine->scratch.obj); WARN_ON(engine->scratch); obj = i915_gem_object_create_stolen(&engine->i915->drm, size); if (!obj) obj = i915_gem_object_create(&engine->i915->drm, size); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate scratch page\n"); ret = PTR_ERR(obj); goto err; return PTR_ERR(obj); } ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH); vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unref; } ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); if (ret) goto err_unref; engine->scratch.obj = obj; engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", engine->name, engine->scratch.gtt_offset); engine->scratch = vma; DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n", engine->name, vma->node.start); return 0; err_unref: i915_gem_object_put(engine->scratch.obj); err: i915_gem_object_put(obj); return ret; } Loading Loading @@ -1305,7 +1312,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine) dev_priv->semaphore_obj = NULL; } intel_fini_pipe_control(engine); intel_engine_cleanup_scratch(engine); } static int gen8_rcs_signal(struct drm_i915_gem_request *req) Loading Loading @@ -1763,7 +1770,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req, unsigned int dispatch_flags) { struct intel_ring *ring = req->ring; u32 cs_offset = req->engine->scratch.gtt_offset; u32 cs_offset = req->engine->scratch->node.start; int ret; ret = intel_ring_begin(req, 6); Loading Loading @@ -2793,11 +2800,11 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) return ret; if (INTEL_GEN(dev_priv) >= 6) { ret = intel_init_pipe_control(engine, 4096); ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; } else if (HAS_BROKEN_CS_TLB(dev_priv)) { ret = intel_init_pipe_control(engine, I830_WA_SIZE); ret = intel_engine_create_scratch(engine, I830_WA_SIZE); if (ret) return ret; } Loading