Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f51455d4 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Replace 4096 with PAGE_SIZE or I915_GTT_PAGE_SIZE



Start converting over from the byte count to its semantic macro, either
we want to allocate the size of a physical page in main memory or we
want the size of a virtual page in the GTT. 4096 could mean either, but
PAGE_SIZE and I915_GTT_PAGE_SIZE are explicit and should help improve
code comprehension and future changes. In the future, we may want to use
variable GTT page sizes and so have the challenge of knowing which
hardcoded values were used to represent a physical page vs the virtual
page.

v2: Look for a few more 4096s to convert, discover IS_ALIGNED().
v3: 4096ul paranoia, make fence alignment a distinct value of 4096, keep
bdw stolen w/a as 4096 until we know better.
v4: Add asserts that i915_vma_insert() start/end are aligned to GTT page
sizes.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170110144734.26052-1-chris@chris-wilson.co.uk


Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 2a20d6f8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3496,7 +3496,7 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
		return;

	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 4096;
		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;

	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
+4 −3
Original line number Diff line number Diff line
@@ -97,7 +97,7 @@
 * part. It should be safe to decrease this, but it's more future proof as is.
 */
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT

static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
@@ -341,7 +341,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
	else
		ctx->ggtt_offset_bias = 4096;
		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;

	return ctx;

@@ -456,7 +456,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv), 4096);
			round_up(get_context_size(dev_priv),
				 I915_GTT_PAGE_SIZE);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
+2 −2
Original line number Diff line number Diff line
@@ -264,9 +264,9 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
	if (check_color) {
		/* Expand search to cover neighbouring guard pages (or lack!) */
		if (start > target->vm->start)
			start -= 4096;
			start -= I915_GTT_PAGE_SIZE;
		if (end < target->vm->start + target->vm->total)
			end += 4096;
			end += I915_GTT_PAGE_SIZE;
	}

	drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
+2 −3
Original line number Diff line number Diff line
@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
			memset(&cache->node, 0, sizeof(cache->node));
			ret = drm_mm_insert_node_in_range_generic
				(&ggtt->base.mm, &cache->node,
				 4096, 0, I915_COLOR_UNEVICTABLE,
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
				 0, ggtt->mappable_end,
				 DRM_MM_SEARCH_DEFAULT,
				 DRM_MM_CREATE_DEFAULT);
@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma)
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
		!i915_vma_is_ggtt(vma));

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

	if (vma->node.size < entry->pad_to_size)
+6 −6
Original line number Diff line number Diff line
@@ -80,11 +80,11 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
		unsigned int stride = i915_gem_object_get_stride(vma->obj);

		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
		GEM_BUG_ON(vma->node.start & 4095);
		GEM_BUG_ON(vma->fence_size & 4095);
		GEM_BUG_ON(stride & 127);
		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
		GEM_BUG_ON(!IS_ALIGNED(stride, 128));

		val = (vma->node.start + vma->fence_size - 4096) << 32;
		val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
		val |= vma->node.start;
		val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
@@ -127,7 +127,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
		GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
		GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));

		if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
			stride /= 128;
@@ -166,7 +166,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
		GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
		GEM_BUG_ON(!is_power_of_2(stride / 128));
		GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));

		val = vma->node.start;
		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
Loading