Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 115a5c2b authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge remote branch 'korg/drm-radeon-next' of into drm-linus

This merges some TTM overhauls to allow us to do better object placement
for certain radeon GPUs that need scanout+cursor within range of each other,
along with an API change to not return ERESTART to userspace, but to use
ERESTARTSYS properly internally and have it convert to EINTR and catch that
correctly. Also lots of radeon fixes across the board.
parents 0b5e8db6 fb53f862
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{
	int count = 0;

	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(dev);

	drm_fb_helper_parse_command_line(dev);

	count = drm_helper_probe_connector_modes(dev,
+108 −0
Original line number Diff line number Diff line
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_get_block_generic);

struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int atomic)
{
	struct drm_mm_node *align_splitoff = NULL;
	unsigned tmp = 0;
	unsigned wasted = 0;

	if (node->start < start)
		wasted += start - node->start;
	if (alignment)
		tmp = ((node->start + wasted) % alignment);

	if (tmp)
		wasted += alignment - tmp;
	if (wasted) {
		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
		if (unlikely(align_splitoff == NULL))
			return NULL;
	}

	if (node->size == size) {
		list_del_init(&node->fl_entry);
		node->free = 0;
	} else {
		node = drm_mm_split_at_start(node, size, atomic);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);

/*
 * Put a block. Merge with the previous and / or next block if they are free.
 * Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free);

struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int best_match)
{
	struct list_head *list;
	const struct list_head *free_stack = &mm->fl_entry;
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
	unsigned long best_size;
	unsigned wasted;

	best = NULL;
	best_size = ~0UL;

	list_for_each(list, free_stack) {
		entry = list_entry(list, struct drm_mm_node, fl_entry);
		wasted = 0;

		if (entry->size < size)
			continue;

		if (entry->start > end || (entry->start+entry->size) < start)
			continue;

		if (entry->start < start)
			wasted += start - entry->start;

		if (alignment) {
			register unsigned tmp = (entry->start + wasted) % alignment;
			if (tmp)
				wasted += alignment - tmp;
		}

		if (entry->size >= size + wasted) {
			if (!best_match)
				return entry;
			if (size < best_size) {
				best = entry;
				best_size = entry->size;
			}
		}
	}

	return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);

int drm_mm_clean(struct drm_mm * mm)
{
	struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_takedown);

void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
	int total_used = 0, total_free = 0, total = 0;

	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
			prefix, entry->start, entry->start + entry->size,
			entry->size, entry->free ? "free" : "used");
		total += entry->size;
		if (entry->free)
			total_free += entry->size;
		else
			total_used += entry->size;
	}
	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
+15 −4
Original line number Diff line number Diff line
@@ -499,6 +499,16 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
	else
		pll = &rdev->clock.p2pll;

	if (ASIC_IS_AVIVO(rdev)) {
		if (radeon_new_pll)
			radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
						 &fb_div, &frac_fb_div,
						 &ref_div, &post_div, pll_flags);
		else
			radeon_compute_pll(pll, adjusted_clock, &pll_clock,
					   &fb_div, &frac_fb_div,
					   &ref_div, &post_div, pll_flags);
	} else
		radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
				   &ref_div, &post_div, pll_flags);

@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
	}
	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
	radeon_bo_unreserve(rbo);
	if (tiling_flags & RADEON_TILING_MACRO)
		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;

	switch (crtc->fb->bits_per_pixel) {
	case 8:
@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
		return -EINVAL;
	}

	if (tiling_flags & RADEON_TILING_MACRO)
		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;

	if (tiling_flags & RADEON_TILING_MICRO)
		fb_format |= AVIVO_D1GRPH_TILED;

+2 −0
Original line number Diff line number Diff line
@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
	radeon_combios_asic_init(rdev->ddev);
	/* Resume clock after posting */
	r100_clock_startup(rdev);
	/* Initialize surface registers */
	radeon_surface_init(rdev);
	return r100_startup(rdev);
}

+2 −0
Original line number Diff line number Diff line
@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
	radeon_combios_asic_init(rdev->ddev);
	/* Resume clock after posting */
	r300_clock_startup(rdev);
	/* Initialize surface registers */
	radeon_surface_init(rdev);
	return r300_startup(rdev);
}

Loading