Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a91576d7 authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Dave Airlie
Browse files

drm/ttm: Pass GFP flags in order to avoid deadlock.



Commit 7dc19d5a "drivers: convert shrinkers to new count/scan API" added
deadlock warnings that ttm_page_pool_free() and ttm_dma_page_pool_free()
are currently doing GFP_KERNEL allocation.

But these functions did not get updated to receive gfp_t argument.
This patch explicitly passes sc->gfp_mask or GFP_KERNEL to these functions,
and removes the deadlock warning.

Signed-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: stable <stable@kernel.org> [2.6.35+]
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 71336e01
Loading
Loading
Loading
Loading
+10 −9
Original line number Diff line number Diff line
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
 *
 * @pool: to free the pages from
 * @free_all: If set to true will free all pages in pool
 * @gfp: GFP flags.
 **/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
			      gfp_t gfp)
{
	unsigned long irq_flags;
	struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
	if (NUM_PAGES_TO_ALLOC < nr_free)
		npages_to_free = NUM_PAGES_TO_ALLOC;

	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
			GFP_KERNEL);
	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
	if (!pages_to_free) {
		pr_err("Failed to allocate memory for pool free operation\n");
		return 0;
@@ -382,9 +383,7 @@ out:
 *
 * XXX: (dchinner) Deadlock warning!
 *
 * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
 * this can deadlock when called a sc->gfp_mask that is not equal to
 * GFP_KERNEL.
 * We need to pass sc->gfp_mask to ttm_page_pool_free().
 *
 * This code is crying out for a shrinker per pool....
 */
@@ -408,7 +407,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
		if (shrink_pages == 0)
			break;
		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
		shrink_pages = ttm_page_pool_free(pool, nr_free);
		shrink_pages = ttm_page_pool_free(pool, nr_free,
						  sc->gfp_mask);
		freed += nr_free - shrink_pages;
	}
	mutex_unlock(&lock);
@@ -710,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
	}
	spin_unlock_irqrestore(&pool->lock, irq_flags);
	if (npages)
		ttm_page_pool_free(pool, npages);
		ttm_page_pool_free(pool, npages, GFP_KERNEL);
}

/*
@@ -850,7 +850,8 @@ void ttm_page_alloc_fini(void)
	ttm_pool_mm_shrink_fini(_manager);

	for (i = 0; i < NUM_POOLS; ++i)
		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
				   GFP_KERNEL);

	kobject_put(&_manager->kobj);
	_manager = NULL;
+9 −10
Original line number Diff line number Diff line
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
 *
 * @pool: to free the pages from
 * @nr_free: If set to true will free all pages in pool
 * @gfp: GFP flags.
 **/
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
				       gfp_t gfp)
{
	unsigned long irq_flags;
	struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
			 npages_to_free, nr_free);
	}
#endif
	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
			GFP_KERNEL);
	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);

	if (!pages_to_free) {
		pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
		if (pool->type != type)
			continue;
		/* Takes a spinlock.. */
		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
		/* This code path is called after _all_ references to the
		 * struct device has been dropped - so nobody should be
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)

	/* shrink pool if necessary (only on !is_cached pools)*/
	if (npages)
		ttm_dma_page_pool_free(pool, npages);
		ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
	ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
 *
 * XXX: (dchinner) Deadlock warning!
 *
 * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
 * needs to be paid to sc->gfp_mask to determine if this can be done or not.
 * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
 * bad.
 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
 *
 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
 * shrinkers
@@ -1030,7 +1028,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
		if (++idx < pool_offset)
			continue;
		nr_free = shrink_pages;
		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
						      sc->gfp_mask);
		freed += nr_free - shrink_pages;

		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",