Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9ce523cc authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau: separate buffer object backing memory from nvkm structures



Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent cb7e88e7
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@ nouveau-y += nouveau_vga.o
# DRM - memory management
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
+0 −6
Original line number Diff line number Diff line
@@ -22,12 +22,6 @@
#define NV_MEM_COMP_VM 0x03

struct nvkm_mem {
	struct drm_device *dev;

	struct nvkm_vma bar_vma;
	struct nvkm_vma vma[2];
	u8  page_shift;

	struct nvkm_mm_node *tag;
	struct nvkm_mm_node *mem;
	dma_addr_t *pages;
+4 −1
Original line number Diff line number Diff line
@@ -20,7 +20,10 @@ struct nvkm_vma {
	int refcount;
	struct nvkm_vm *vm;
	struct nvkm_mm_node *node;
	union {
		u64 offset;
		u64 addr;
	};
	u32 access;
};

+61 −50
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"

/*
 * NV10-NV40 tiling helpers
@@ -670,14 +671,14 @@ static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	int ret = RING_SPACE(chan, 10);
	if (ret == 0) {
		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
@@ -702,9 +703,9 @@ static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	u64 src_offset = mem->vma[0].addr;
	u64 dst_offset = mem->vma[1].addr;
	u32 page_count = new_reg->num_pages;
	int ret;

@@ -740,9 +741,9 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	u64 src_offset = mem->vma[0].addr;
	u64 dst_offset = mem->vma[1].addr;
	u32 page_count = new_reg->num_pages;
	int ret;

@@ -779,9 +780,9 @@ static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	u64 src_offset = mem->vma[0].addr;
	u64 dst_offset = mem->vma[1].addr;
	u32 page_count = new_reg->num_pages;
	int ret;

@@ -817,14 +818,14 @@ static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, 0x00000000 /* COPY */);
		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
	}
@@ -835,15 +836,15 @@ static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
	}
	return ret;
@@ -869,12 +870,12 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
	struct nvkm_mem *mem = old_reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(old_reg);
	u64 length = (new_reg->num_pages << PAGE_SHIFT);
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	int src_tiled = !!mem->memtype;
	int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
	u64 src_offset = mem->vma[0].addr;
	u64 dst_offset = mem->vma[1].addr;
	int src_tiled = !!mem->kind;
	int dst_tiled = !!nouveau_mem(new_reg)->kind;
	int ret;

	while (length) {
@@ -1011,25 +1012,34 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
		     struct ttm_mem_reg *reg)
{
	struct nvkm_mem *old_mem = bo->mem.mm_node;
	struct nvkm_mem *new_mem = reg->mm_node;
	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
	struct nouveau_mem *new_mem = nouveau_mem(reg);
	struct nvkm_vm *vmm = drm->client.vm;
	u64 size = (u64)reg->num_pages << PAGE_SHIFT;
	int ret;

	ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
			  NV_MEM_ACCESS_RW, &old_mem->vma[0]);
	ret = nvkm_vm_get(vmm, size, old_mem->mem.page, NV_MEM_ACCESS_RW,
			  &old_mem->vma[0]);
	if (ret)
		return ret;

	ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
			  NV_MEM_ACCESS_RW, &old_mem->vma[1]);
	ret = nvkm_vm_get(vmm, size, new_mem->mem.page, NV_MEM_ACCESS_RW,
			  &old_mem->vma[1]);
	if (ret) {
		nvkm_vm_put(&old_mem->vma[0]);
		return ret;
	}

	nvkm_vm_map(&old_mem->vma[0], old_mem);
	nvkm_vm_map(&old_mem->vma[1], new_mem);
	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
	if (ret)
		goto done;

	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
done:
	if (ret) {
		nvkm_vm_put(&old_mem->vma[1]);
		nvkm_vm_put(&old_mem->vma[0]);
	}
	return 0;
}

@@ -1211,8 +1221,8 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
		     struct ttm_mem_reg *new_reg)
{
	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct nvkm_mem *mem = new_reg ? new_reg->mm_node : NULL;
	struct nvkm_vma *vma;

	/* ttm can now (stupidly) pass the driver bos it didn't create... */
@@ -1220,9 +1230,9 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
		return;

	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
	    mem->page_shift == nvbo->page) {
	    mem->mem.page == nvbo->page) {
		list_for_each_entry(vma, &nvbo->vma_list, head) {
			nvkm_vm_map(vma, mem);
			nvkm_vm_map(vma, mem->_mem);
		}
	} else {
		list_for_each_entry(vma, &nvbo->vma_list, head) {
@@ -1343,7 +1353,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	struct nvkm_device *device = nvxx_device(&drm->client.device);
	struct nvkm_mem *mem = reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(reg);
	int ret;

	reg->bus.addr = NULL;
@@ -1365,7 +1375,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
			reg->bus.is_iomem = !drm->agp.cma;
		}
#endif
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->kind)
			/* untiled */
			break;
		/* fallthrough, tiled memory */
@@ -1377,14 +1387,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
			struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
			int page_shift = 12;
			if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
				page_shift = mem->page_shift;
				page_shift = mem->mem.page;

			ret = nvkm_vm_get(bar, mem->size << 12, page_shift,
					  NV_MEM_ACCESS_RW, &mem->bar_vma);
			ret = nvkm_vm_get(bar, mem->_mem->size << 12,
					  page_shift, NV_MEM_ACCESS_RW,
					  &mem->bar_vma);
			if (ret)
				return ret;

			nvkm_vm_map(&mem->bar_vma, mem);
			nvkm_vm_map(&mem->bar_vma, mem->_mem);
			reg->bus.offset = mem->bar_vma.offset;
		}
		break;
@@ -1397,7 +1408,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
	struct nvkm_mem *mem = reg->mm_node;
	struct nouveau_mem *mem = nouveau_mem(reg);

	if (!mem->bar_vma.node)
		return;
@@ -1606,7 +1617,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
		   struct nvkm_vma *vma)
{
	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
	struct nvkm_mem *mem = nvbo->bo.mem.mm_node;
	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
	int ret;

	ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
@@ -1614,8 +1625,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
		return ret;

	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
	    mem->page_shift == nvbo->page)
		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
	    mem->mem.page == nvbo->page)
		nvkm_vm_map(vma, mem->_mem);

	list_add_tail(&vma->head, &nvbo->vma_list);
	vma->refcount = 1;
+114 −0
Original line number Diff line number Diff line
/*
 * Copyright 2017 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"

#include <drm/ttm/ttm_bo_driver.h>

int
nouveau_mem_map(struct nouveau_mem *mem,
		struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
	nvkm_vm_map(vma, mem->_mem);
	return 0;
}

void
nouveau_mem_fini(struct nouveau_mem *mem)
{
	if (mem->vma[1].node) {
		nvkm_vm_unmap(&mem->vma[1]);
		nvkm_vm_put(&mem->vma[1]);
	}
	if (mem->vma[0].node) {
		nvkm_vm_unmap(&mem->vma[0]);
		nvkm_vm_put(&mem->vma[0]);
	}
}

int
nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
{
	struct nouveau_mem *mem = nouveau_mem(reg);
	struct nouveau_cli *cli = mem->cli;

	if (mem->kind && cli->device.info.chipset == 0x50)
		mem->comp = mem->kind = 0;
	if (mem->comp) {
		if (cli->device.info.chipset >= 0xc0)
			mem->kind = gf100_pte_storage_type_map[mem->kind];
		mem->comp = 0;
	}

	mem->__mem.size = (reg->num_pages << PAGE_SHIFT) >> 12;
	mem->__mem.memtype = (mem->comp << 7) | mem->kind;
	if (tt->ttm.sg) mem->__mem.sg    = tt->ttm.sg;
	else            mem->__mem.pages = tt->dma_address;
	mem->_mem = &mem->__mem;
	mem->mem.page = 12;
	return 0;
}

int
nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
{
	struct nouveau_mem *mem = nouveau_mem(reg);
	struct nvkm_ram *ram = nvxx_fb(&mem->cli->device)->ram;
	u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
	int ret;

	mem->mem.page = page;

	ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
			     (mem->comp << 8) | mem->kind, &mem->_mem);
	if (ret)
		return ret;

	reg->start = mem->_mem->offset >> PAGE_SHIFT;
	return ret;
}

void
nouveau_mem_del(struct ttm_mem_reg *reg)
{
	struct nouveau_mem *mem = nouveau_mem(reg);
	nouveau_mem_fini(mem);
	kfree(reg->mm_node);
	reg->mm_node = NULL;
}

int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
		struct ttm_mem_reg *reg)
{
	struct nouveau_mem *mem;

	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
		return -ENOMEM;
	mem->cli = cli;
	mem->kind = kind;
	mem->comp = comp;

	reg->mm_node = mem;
	return 0;
}
Loading