Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8e0f93a authored by Eric Anholt's avatar Eric Anholt
Browse files

drm/i915: Replace a calloc followed by copying data over it with malloc.



Execbufs involve quite a bit of payload, to the extent that cache misses
show up in the profiles here, and a suspicion that some of those cachelines
may get evicted and then reloaded in the subsequent copy.

This is still abstracted like drm_calloc_large since we want to check for
size overflow, and because we want to choose between kmalloc and vmalloc
on the fly.  cairo's interface for malloc-with-calloc's-args was used as
the model.

Signed-off-by: default avatarEric Anholt <eric@anholt.net>
parent 5b8f0be0
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -3563,8 +3563,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
		return -EINVAL;
		return -EINVAL;
	}
	}
	/* Copy in the exec list from userland */
	/* Copy in the exec list from userland */
	exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
	if (exec_list == NULL || object_list == NULL) {
	if (exec_list == NULL || object_list == NULL) {
		DRM_ERROR("Failed to allocate exec or object list "
		DRM_ERROR("Failed to allocate exec or object list "
			  "for %d buffers\n",
			  "for %d buffers\n",
+14 −1
Original line number Original line Diff line number Diff line
@@ -1545,14 +1545,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)


static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
{
	if (size != 0 && nmemb > ULONG_MAX / size)
		return NULL;

	if (size * nmemb <= PAGE_SIZE)
	if (size * nmemb <= PAGE_SIZE)
	    return kcalloc(nmemb, size, GFP_KERNEL);
	    return kcalloc(nmemb, size, GFP_KERNEL);


	return __vmalloc(size * nmemb,
			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
}

/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
{
	if (size != 0 && nmemb > ULONG_MAX / size)
	if (size != 0 && nmemb > ULONG_MAX / size)
		return NULL;
		return NULL;


	if (size * nmemb <= PAGE_SIZE)
	    return kmalloc(nmemb * size, GFP_KERNEL);

	return __vmalloc(size * nmemb,
	return __vmalloc(size * nmemb,
			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
}


static __inline void drm_free_large(void *ptr)
static __inline void drm_free_large(void *ptr)