Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 584ef2cd authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/radeon/kms: balance asic_reset functions
  drm/radeon/kms: remove duplicate card_posted() functions
  drm/radeon/kms: add module option for pcie gen2
  drm/radeon/kms: fix typo in evergreen safe reg
  drm/nouveau: fix gpu page faults triggered by plymouthd
  drm/nouveau: greatly simplify mm, killing some bugs in the process
  drm/nvc0: enable protection of system-use-only structures in vm
  drm/nv40: initialise 0x17xx on all chipsets that have it
  drm/nv40: make detection of 0x4097-ful chipsets available everywhere
parents e1288cd7 25b2ec5b
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -160,6 +160,7 @@ enum nouveau_flags {
#define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
#define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
#define NVOBJ_FLAG_VM			(1 << 3)
#define NVOBJ_FLAG_VM_USER		(1 << 4)

#define NVOBJ_CINST_GLOBAL	0xdeadbeef

@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
		dev->pdev->subsystem_device == sub_device;
}

/* returns 1 if device is one of the nv4x using the 0x4497 object class,
 * helpful to determine a number of other hardware features
 */
static inline int
nv44_graph_class(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	if ((dev_priv->chipset & 0xf0) == 0x60)
		return 1;

	return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
}

/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO  1
#define NV_MEM_ACCESS_WO  2
+2 −2
Original line number Diff line number Diff line
@@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
			      FBINFO_HWACCEL_IMAGEBLIT;
	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &nouveau_fbcon_sw_ops;
	info->fix.smem_start = dev->mode_config.fb_base +
			       (nvbo->bo.mem.start << PAGE_SHIFT);
	info->fix.smem_start = nvbo->bo.mem.bus.base +
			       nvbo->bo.mem.bus.offset;
	info->fix.smem_len = size;

	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+10 −16
Original line number Diff line number Diff line
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
	struct nouveau_mm *mm = man->priv;
	struct nouveau_mm_node *r;
	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
	int i;
	u32 total = 0, free = 0;

	mutex_lock(&mm->mutex);
	list_for_each_entry(r, &mm->nodes, nl_entry) {
		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
		       prefix, r->free ? "free" : "used", r->type,
		       ((u64)r->offset << 12),
		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
		       prefix, r->type, ((u64)r->offset << 12),
		       (((u64)r->offset + r->length) << 12));

		total += r->length;
		ttotal[r->type] += r->length;
		if (r->free)
			tfree[r->type] += r->length;
		else
			tused[r->type] += r->length;
		if (!r->type)
			free += r->length;
	}
	mutex_unlock(&mm->mutex);

	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
	for (i = 0; i < 3; i++) {
		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
				  "used 0x%010llx, free 0x%010llx\n", prefix,
		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
	}
	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
	       prefix, (u64)total << 12, (u64)free << 12);
	printk(KERN_DEBUG "%s  block: 0x%08x\n",
	       prefix, mm->block_size << 12);
}

const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+41 −141
Original line number Diff line number Diff line
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)

	b->offset = a->offset;
	b->length = size;
	b->free   = a->free;
	b->type   = a->type;
	a->offset += size;
	a->length -= size;
	list_add_tail(&b->nl_entry, &a->nl_entry);
	if (b->free)
	if (b->type == 0)
		list_add_tail(&b->fl_entry, &a->fl_entry);
	return b;
}

static struct nouveau_mm_node *
nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)

void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
	struct nouveau_mm_node *prev, *next;
	struct nouveau_mm_node *prev = node(this, prev);
	struct nouveau_mm_node *next = node(this, next);

	/* try to merge with free adjacent entries of same type */
	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
	if (this->nl_entry.prev != &rmm->nodes) {
		if (prev->free && prev->type == this->type) {
	list_add(&this->fl_entry, &rmm->free);
	this->type = 0;

	if (prev && prev->type == 0) {
		prev->length += this->length;
		region_put(rmm, this);
		this = prev;
	}
	}

	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
	if (this->nl_entry.next != &rmm->nodes) {
		if (next->free && next->type == this->type) {
	if (next && next->type == 0) {
		next->offset  = this->offset;
		next->length += this->length;
		region_put(rmm, this);
			this = next;
	}
}

	return this;
}

void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
	u32 block_s, block_l;

	this->free = true;
	list_add(&this->fl_entry, &rmm->free);
	this = nouveau_mm_merge(rmm, this);

	/* any entirely free blocks now?  we'll want to remove typing
	 * on them now so they can be use for any memory allocation
	 */
	block_s = roundup(this->offset, rmm->block_size);
	if (block_s + rmm->block_size > this->offset + this->length)
		return;

	/* split off any still-typed region at the start */
	if (block_s != this->offset) {
		if (!region_split(rmm, this, block_s - this->offset))
			return;
	}

	/* split off the soon-to-be-untyped block(s) */
	block_l = rounddown(this->length, rmm->block_size);
	if (block_l != this->length) {
		this = region_split(rmm, this, block_l);
		if (!this)
			return;
	}

	/* mark as having no type, and retry merge with any adjacent
	 * untyped blocks
	 */
	this->type = 0;
	nouveau_mm_merge(rmm, this);
}

int
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
	       u32 align, struct nouveau_mm_node **pnode)
{
	struct nouveau_mm_node *this, *tmp, *next;
	u32 splitoff, avail, alloc;

	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
		if (this->nl_entry.next == &rmm->nodes)
			next = NULL;

		/* skip wrongly typed blocks */
		if (this->type && this->type != type)
	struct nouveau_mm_node *prev, *this, *next;
	u32 min = size_nc ? size_nc : size;
	u32 align_mask = align - 1;
	u32 splitoff;
	u32 s, e;

	list_for_each_entry(this, &rmm->free, fl_entry) {
		e = this->offset + this->length;
		s = this->offset;

		prev = node(this, prev);
		if (prev && prev->type != type)
			s = roundup(s, rmm->block_size);

		next = node(this, next);
		if (next && next->type != type)
			e = rounddown(e, rmm->block_size);

		s  = (s + align_mask) & ~align_mask;
		e &= ~align_mask;
		if (s > e || e - s < min)
			continue;

		/* account for alignment */
		splitoff = this->offset & (align - 1);
		if (splitoff)
			splitoff = align - splitoff;

		if (this->length <= splitoff)
			continue;

		/* determine total memory available from this, and
		 * the next block (if appropriate)
		 */
		avail = this->length;
		if (next && next->free && (!next->type || next->type == type))
			avail += next->length;

		avail -= splitoff;

		/* determine allocation size */
		if (size_nc) {
			alloc = min(avail, size);
			alloc = rounddown(alloc, size_nc);
			if (alloc == 0)
				continue;
		} else {
			alloc = size;
			if (avail < alloc)
				continue;
		}

		/* untyped block, split off a chunk that's a multiple
		 * of block_size and type it
		 */
		if (!this->type) {
			u32 block = roundup(alloc + splitoff, rmm->block_size);
			if (this->length < block)
				continue;
		splitoff = s - this->offset;
		if (splitoff && !region_split(rmm, this, splitoff))
			return -ENOMEM;

			this = region_split(rmm, this, block);
		this = region_split(rmm, this, min(size, e - s));
		if (!this)
			return -ENOMEM;

		this->type = type;
		}

		/* stealing memory from adjacent block */
		if (alloc > this->length) {
			u32 amount = alloc - (this->length - splitoff);

			if (!next->type) {
				amount = roundup(amount, rmm->block_size);

				next = region_split(rmm, next, amount);
				if (!next)
					return -ENOMEM;

				next->type = type;
			}

			this->length += amount;
			next->offset += amount;
			next->length -= amount;
			if (!next->length) {
				list_del(&next->nl_entry);
				list_del(&next->fl_entry);
				kfree(next);
			}
		}

		if (splitoff) {
			if (!region_split(rmm, this, splitoff))
				return -ENOMEM;
		}

		this = region_split(rmm, this, alloc);
		if (this == NULL)
			return -ENOMEM;

		this->free = false;
		list_del(&this->fl_entry);
		*pnode = this;
		return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
	if (!heap)
		return -ENOMEM;
	heap->free = true;
	heap->offset = roundup(offset, block);
	heap->length = rounddown(offset + length, block) - heap->offset;

+1 −3
Original line number Diff line number Diff line
@@ -30,9 +30,7 @@ struct nouveau_mm_node {
	struct list_head fl_entry;
	struct list_head rl_entry;

	bool free;
	int  type;

	u8  type;
	u32 offset;
	u32 length;
};
Loading