Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07bbc1c5 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/core/memory: split info pointers from accessor pointers



The accessor functions can change as a result of acquire()/release() calls,
and are protected by any refcounting done there.

Other functions must remain constant, as they can be called any time.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent dde59b9c
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -9,7 +9,10 @@ struct nvkm_vm;
#define NVOBJ_FLAG_HEAP       0x00000004

struct nvkm_gpuobj {
	union {
		const struct nvkm_gpuobj_func *func;
		const struct nvkm_gpuobj_func *ptrs;
	};
	struct nvkm_gpuobj *parent;
	struct nvkm_memory *memory;
	struct nvkm_mm_node *node;
+7 −3
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@ enum nvkm_memory_target {

struct nvkm_memory {
	const struct nvkm_memory_func *func;
	const struct nvkm_memory_ptrs *ptrs;
};

struct nvkm_memory_func {
@@ -24,9 +25,12 @@ struct nvkm_memory_func {
	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
	void __iomem *(*acquire)(struct nvkm_memory *);
	void (*release)(struct nvkm_memory *);
	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
};

struct nvkm_memory_ptrs {
	u32 (*rd32)(struct nvkm_memory *, u64 offset);
	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
};

void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
@@ -43,8 +47,8 @@ void nvkm_memory_del(struct nvkm_memory **);
 * macros to guarantee correct behaviour across all chipsets
 */
#define nvkm_kmap(o)     (o)->func->acquire(o)
#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
#define nvkm_ro32(o,a)   (o)->ptrs->rd32((o), (a))
#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({                                                  \
	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
+14 −3
Original line number Diff line number Diff line
@@ -112,9 +112,13 @@ nvkm_instobj_func = {
	.size = nvkm_instobj_size,
	.acquire = nvkm_instobj_acquire,
	.release = nvkm_instobj_release,
	.map = nvkm_instobj_map,
};

static const struct nvkm_memory_ptrs
nvkm_instobj_ptrs = {
	.rd32 = nvkm_instobj_rd32,
	.wr32 = nvkm_instobj_wr32,
	.map = nvkm_instobj_map,
};

static void
@@ -137,8 +141,10 @@ nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
{
	struct nvkm_instobj *iobj = nvkm_instobj(memory);
	iobj->map = nvkm_kmap(iobj->parent);
	if (iobj->map)
	if (iobj->map) {
		memory->func = &nvkm_instobj_func;
		memory->ptrs = &nvkm_instobj_ptrs;
	}
	return iobj->map;
}

@@ -165,9 +171,13 @@ nvkm_instobj_func_slow = {
	.boot = nvkm_instobj_boot,
	.acquire = nvkm_instobj_acquire_slow,
	.release = nvkm_instobj_release_slow,
	.map = nvkm_instobj_map,
};

static const struct nvkm_memory_ptrs
nvkm_instobj_ptrs_slow = {
	.rd32 = nvkm_instobj_rd32_slow,
	.wr32 = nvkm_instobj_wr32_slow,
	.map = nvkm_instobj_map,
};

int
@@ -196,6 +206,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
		}

		nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
		iobj->memory.ptrs = &nvkm_instobj_ptrs_slow;
		iobj->parent = memory;
		iobj->imem = imem;
		spin_lock(&iobj->imem->lock);
+7 −3
Original line number Diff line number Diff line
@@ -346,8 +346,6 @@ gk20a_instobj_func_dma = {
	.size = gk20a_instobj_size,
	.acquire = gk20a_instobj_acquire_dma,
	.release = gk20a_instobj_release_dma,
	.rd32 = gk20a_instobj_rd32,
	.wr32 = gk20a_instobj_wr32,
	.map = gk20a_instobj_map,
};

@@ -359,9 +357,13 @@ gk20a_instobj_func_iommu = {
	.size = gk20a_instobj_size,
	.acquire = gk20a_instobj_acquire_iommu,
	.release = gk20a_instobj_release_iommu,
	.map = gk20a_instobj_map,
};

static const struct nvkm_memory_ptrs
gk20a_instobj_ptrs = {
	.rd32 = gk20a_instobj_rd32,
	.wr32 = gk20a_instobj_wr32,
	.map = gk20a_instobj_map,
};

static int
@@ -377,6 +379,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
	*_node = &node->base;

	nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
	node->base.memory.ptrs = &gk20a_instobj_ptrs;

	node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
					   &node->handle, GFP_KERNEL,
@@ -424,6 +427,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
	node->dma_addrs = (void *)(node->pages + npages);

	nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
	node->base.memory.ptrs = &gk20a_instobj_ptrs;

	/* Allocate backing memory */
	for (i = 0; i < npages; i++) {
+28 −23
Original line number Diff line number Diff line
@@ -43,22 +43,31 @@ struct nv04_instobj {
	struct nvkm_mm_node *node;
};

static enum nvkm_memory_target
nv04_instobj_target(struct nvkm_memory *memory)
static void
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
	return NVKM_MEM_TARGET_INST;
	struct nv04_instobj *iobj = nv04_instobj(memory);
	struct nvkm_device *device = iobj->imem->base.subdev.device;
	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
}

static u64
nv04_instobj_addr(struct nvkm_memory *memory)
static u32
nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
	return nv04_instobj(memory)->node->offset;
	struct nv04_instobj *iobj = nv04_instobj(memory);
	struct nvkm_device *device = iobj->imem->base.subdev.device;
	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
}

static u64
nv04_instobj_size(struct nvkm_memory *memory)
static const struct nvkm_memory_ptrs
nv04_instobj_ptrs = {
	.rd32 = nv04_instobj_rd32,
	.wr32 = nv04_instobj_wr32,
};

static void
nv04_instobj_release(struct nvkm_memory *memory)
{
	return nv04_instobj(memory)->node->length;
}

static void __iomem *
@@ -69,25 +78,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory)
	return device->pri + 0x700000 + iobj->node->offset;
}

static void
nv04_instobj_release(struct nvkm_memory *memory)
static u64
nv04_instobj_size(struct nvkm_memory *memory)
{
	return nv04_instobj(memory)->node->length;
}

static u32
nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
static u64
nv04_instobj_addr(struct nvkm_memory *memory)
{
	struct nv04_instobj *iobj = nv04_instobj(memory);
	struct nvkm_device *device = iobj->imem->base.subdev.device;
	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
	return nv04_instobj(memory)->node->offset;
}

static void
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
static enum nvkm_memory_target
nv04_instobj_target(struct nvkm_memory *memory)
{
	struct nv04_instobj *iobj = nv04_instobj(memory);
	struct nvkm_device *device = iobj->imem->base.subdev.device;
	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
	return NVKM_MEM_TARGET_INST;
}

static void *
@@ -108,8 +114,6 @@ nv04_instobj_func = {
	.addr = nv04_instobj_addr,
	.acquire = nv04_instobj_acquire,
	.release = nv04_instobj_release,
	.rd32 = nv04_instobj_rd32,
	.wr32 = nv04_instobj_wr32,
};

static int
@@ -125,6 +129,7 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
	*pmemory = &iobj->memory;

	nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
	iobj->memory.ptrs = &nv04_instobj_ptrs;
	iobj->imem = imem;

	mutex_lock(&imem->base.subdev.mutex);
Loading