Loading drivers/gpu/drm/nouveau/nouveau_drv.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -518,7 +518,7 @@ struct nouveau_pm_engine { }; }; struct nouveau_vram_engine { struct nouveau_vram_engine { struct nouveau_mm *mm; struct nouveau_mm mm; int (*init)(struct drm_device *); int (*init)(struct drm_device *); void (*takedown)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); Loading drivers/gpu/drm/nouveau/nouveau_mm.c +25 −35 Original line number Original line Diff line number Diff line Loading @@ -27,7 +27,7 @@ #include "nouveau_mm.h" #include "nouveau_mm.h" static inline void static inline void region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a) { { list_del(&a->nl_entry); list_del(&a->nl_entry); list_del(&a->fl_entry); list_del(&a->fl_entry); Loading @@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) } } static struct nouveau_mm_node * static struct nouveau_mm_node * region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) { { struct nouveau_mm_node *b; struct nouveau_mm_node *b; Loading @@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) return b; return b; } } #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) void void nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this) { { struct nouveau_mm_node *prev = node(this, prev); struct nouveau_mm_node *prev = node(this, prev); struct nouveau_mm_node *next = node(this, next); struct nouveau_mm_node *next = node(this, next); list_add(&this->fl_entry, &rmm->free); list_add(&this->fl_entry, &mm->free); this->type = 0; this->type = 0; if (prev && prev->type == 0) { if (prev && prev->type == 0) { prev->length += this->length; prev->length += this->length; region_put(rmm, this); region_put(mm, this); this = prev; this = prev; } } if (next && next->type == 0) { if (next && next->type == 0) { next->offset = this->offset; next->offset = this->offset; next->length += this->length; next->length += this->length; region_put(rmm, this); region_put(mm, this); } } } } int int nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc, u32 align, struct nouveau_mm_node **pnode) u32 align, struct nouveau_mm_node **pnode) { { struct nouveau_mm_node *prev, *this, *next; struct nouveau_mm_node *prev, *this, *next; Loading @@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, u32 splitoff; u32 splitoff; u32 s, e; u32 s, e; list_for_each_entry(this, &rmm->free, fl_entry) { list_for_each_entry(this, &mm->free, fl_entry) { e = this->offset + this->length; e = this->offset + this->length; s = this->offset; s = this->offset; prev = node(this, prev); prev = node(this, prev); if (prev && prev->type != type) if (prev && prev->type != type) s = roundup(s, rmm->block_size); s = roundup(s, mm->block_size); next = node(this, next); next = node(this, next); if (next && next->type != type) if (next && next->type != type) e = rounddown(e, rmm->block_size); e = rounddown(e, mm->block_size); s = (s + align_mask) & ~align_mask; s = (s + align_mask) & ~align_mask; e &= ~align_mask; e &= ~align_mask; Loading @@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, continue; continue; splitoff = s - this->offset; splitoff = s - this->offset; if (splitoff && !region_split(rmm, this, splitoff)) if (splitoff && !region_split(mm, this, splitoff)) return -ENOMEM; return -ENOMEM; this = region_split(rmm, this, min(size, e - s)); this = region_split(mm, this, min(size, e - s)); if (!this) if (!this) return -ENOMEM; return -ENOMEM; Loading @@ -127,9 +127,8 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, } } int int nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) { { struct nouveau_mm *rmm; struct nouveau_mm_node *heap; struct nouveau_mm_node *heap; heap = kzalloc(sizeof(*heap), GFP_KERNEL); heap = kzalloc(sizeof(*heap), GFP_KERNEL); Loading @@ -138,32 +137,25 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) heap->offset = roundup(offset, block); heap->offset = roundup(offset, block); heap->length = rounddown(offset + length, block) - heap->offset; heap->length = rounddown(offset + length, block) - heap->offset; rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); mutex_init(&mm->mutex); if (!rmm) { mm->block_size = block; kfree(heap); INIT_LIST_HEAD(&mm->nodes); return -ENOMEM; INIT_LIST_HEAD(&mm->free); } rmm->block_size = block; list_add(&heap->nl_entry, &mm->nodes); mutex_init(&rmm->mutex); list_add(&heap->fl_entry, &mm->free); INIT_LIST_HEAD(&rmm->nodes); INIT_LIST_HEAD(&rmm->free); list_add(&heap->nl_entry, &rmm->nodes); list_add(&heap->fl_entry, &rmm->free); *prmm = rmm; return 0; return 0; } } int int nouveau_mm_fini(struct nouveau_mm **prmm) nouveau_mm_fini(struct nouveau_mm *mm) { { struct nouveau_mm *rmm = *prmm; struct nouveau_mm_node *node, *heap = struct nouveau_mm_node *node, *heap = list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); if (!list_is_singular(&rmm->nodes)) { if (!list_is_singular(&mm->nodes)) { printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); list_for_each_entry(node, &rmm->nodes, nl_entry) { list_for_each_entry(node, &mm->nodes, nl_entry) { printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", node->type, node->offset, node->length); node->type, node->offset, node->length); } } Loading @@ -172,7 +164,5 @@ nouveau_mm_fini(struct nouveau_mm **prmm) } } kfree(heap); kfree(heap); kfree(rmm); *prmm = NULL; return 0; return 0; } } drivers/gpu/drm/nouveau/nouveau_mm.h +2 −2 Original line number Original line Diff line number Diff line Loading @@ -44,8 +44,8 @@ struct nouveau_mm { u32 block_size; u32 block_size; }; }; int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); int nouveau_mm_fini(struct nouveau_mm **); int nouveau_mm_fini(struct nouveau_mm *); int nouveau_mm_pre(struct nouveau_mm *); int nouveau_mm_pre(struct nouveau_mm *); int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, u32 align, struct nouveau_mm_node **); u32 align, struct nouveau_mm_node **); Loading drivers/gpu/drm/nouveau/nouveau_vm.c +19 −19 Original line number Original line Diff line number Diff line Loading @@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) vm->map_pgt(vpgd->obj, pde, vpgt->obj); vm->map_pgt(vpgd->obj, pde, vpgt->obj); } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgt); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); } } } } Loading @@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) pgt_size = (1 << (vm->pgt_bits + 12)) >> type; pgt_size = (1 << (vm->pgt_bits + 12)) >> type; pgt_size *= 8; pgt_size *= 8; mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &pgt); NVOBJ_FLAG_ZERO_ALLOC, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); if (unlikely(ret)) if (unlikely(ret)) return ret; return ret; /* someone beat us to filling the PDE while we didn't have the lock */ /* someone beat us to filling the PDE while we didn't have the lock */ if (unlikely(vpgt->refcount[big]++)) { if (unlikely(vpgt->refcount[big]++)) { mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgt); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); return 0; return 0; } } Loading @@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, u32 fpde, lpde, pde; u32 fpde, lpde, pde; int ret; int ret; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); if (unlikely(ret != 0)) { if (unlikely(ret != 0)) { mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); return ret; return ret; } } Loading @@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, if (ret) { if (ret) { if (pde != fpde) if (pde != fpde) nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); nouveau_mm_put(vm->mm, vma->node); nouveau_mm_put(&vm->mm, vma->node); mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); vma->node = NULL; vma->node = NULL; return ret; return ret; } } } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); vma->vm = vm; vma->vm = vm; vma->offset = (u64)vma->node->offset << 12; vma->offset = (u64)vma->node->offset << 12; Loading @@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma) fpde = (vma->node->offset >> vm->pgt_bits); fpde = (vma->node->offset >> vm->pgt_bits); lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); nouveau_mm_put(vm->mm, vma->node); nouveau_mm_put(&vm->mm, vma->node); vma->node = NULL; vma->node = NULL; mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); } } int int Loading Loading @@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) nouveau_gpuobj_ref(pgd, &vpgd->obj); nouveau_gpuobj_ref(pgd, &vpgd->obj); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); for (i = vm->fpde; i <= vm->lpde; i++) for (i = vm->fpde; i <= vm->lpde; i++) vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); list_add(&vpgd->head, &vm->pgd_list); list_add(&vpgd->head, &vm->pgd_list); mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); return 0; return 0; } } Loading @@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) if (!mpgd) if (!mpgd) return; return; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { if (vpgd->obj == mpgd) { if (vpgd->obj == mpgd) { pgd = vpgd->obj; pgd = vpgd->obj; Loading @@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) break; break; } } } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgd); nouveau_gpuobj_ref(NULL, &pgd); } } Loading drivers/gpu/drm/nouveau/nouveau_vm.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -51,7 +51,7 @@ struct nouveau_vma { struct nouveau_vm { struct nouveau_vm { struct drm_device *dev; struct drm_device *dev; struct nouveau_mm *mm; struct nouveau_mm mm; int refcount; int refcount; struct list_head pgd_list; struct list_head pgd_list; Loading Loading
drivers/gpu/drm/nouveau/nouveau_drv.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -518,7 +518,7 @@ struct nouveau_pm_engine { }; }; struct nouveau_vram_engine { struct nouveau_vram_engine { struct nouveau_mm *mm; struct nouveau_mm mm; int (*init)(struct drm_device *); int (*init)(struct drm_device *); void (*takedown)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); Loading
drivers/gpu/drm/nouveau/nouveau_mm.c +25 −35 Original line number Original line Diff line number Diff line Loading @@ -27,7 +27,7 @@ #include "nouveau_mm.h" #include "nouveau_mm.h" static inline void static inline void region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a) { { list_del(&a->nl_entry); list_del(&a->nl_entry); list_del(&a->fl_entry); list_del(&a->fl_entry); Loading @@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) } } static struct nouveau_mm_node * static struct nouveau_mm_node * region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) { { struct nouveau_mm_node *b; struct nouveau_mm_node *b; Loading @@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) return b; return b; } } #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) void void nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this) { { struct nouveau_mm_node *prev = node(this, prev); struct nouveau_mm_node *prev = node(this, prev); struct nouveau_mm_node *next = node(this, next); struct nouveau_mm_node *next = node(this, next); list_add(&this->fl_entry, &rmm->free); list_add(&this->fl_entry, &mm->free); this->type = 0; this->type = 0; if (prev && prev->type == 0) { if (prev && prev->type == 0) { prev->length += this->length; prev->length += this->length; region_put(rmm, this); region_put(mm, this); this = prev; this = prev; } } if (next && next->type == 0) { if (next && next->type == 0) { next->offset = this->offset; next->offset = this->offset; next->length += this->length; next->length += this->length; region_put(rmm, this); region_put(mm, this); } } } } int int nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc, u32 align, struct nouveau_mm_node **pnode) u32 align, struct nouveau_mm_node **pnode) { { struct nouveau_mm_node *prev, *this, *next; struct nouveau_mm_node *prev, *this, *next; Loading @@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, u32 splitoff; u32 splitoff; u32 s, e; u32 s, e; list_for_each_entry(this, &rmm->free, fl_entry) { list_for_each_entry(this, &mm->free, fl_entry) { e = this->offset + this->length; e = this->offset + this->length; s = this->offset; s = this->offset; prev = node(this, prev); prev = node(this, prev); if (prev && prev->type != type) if (prev && prev->type != type) s = roundup(s, rmm->block_size); s = roundup(s, mm->block_size); next = node(this, next); next = node(this, next); if (next && next->type != type) if (next && next->type != type) e = rounddown(e, rmm->block_size); e = rounddown(e, mm->block_size); s = (s + align_mask) & ~align_mask; s = (s + align_mask) & ~align_mask; e &= ~align_mask; e &= ~align_mask; Loading @@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, continue; continue; splitoff = s - this->offset; splitoff = s - this->offset; if (splitoff && !region_split(rmm, this, splitoff)) if (splitoff && !region_split(mm, this, splitoff)) return -ENOMEM; return -ENOMEM; this = region_split(rmm, this, min(size, e - s)); this = region_split(mm, this, min(size, e - s)); if (!this) if (!this) return -ENOMEM; return -ENOMEM; Loading @@ -127,9 +127,8 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, } } int int nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) { { struct nouveau_mm *rmm; struct nouveau_mm_node *heap; struct nouveau_mm_node *heap; heap = kzalloc(sizeof(*heap), GFP_KERNEL); heap = kzalloc(sizeof(*heap), GFP_KERNEL); Loading @@ -138,32 +137,25 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) heap->offset = roundup(offset, block); heap->offset = roundup(offset, block); heap->length = rounddown(offset + length, block) - heap->offset; heap->length = rounddown(offset + length, block) - heap->offset; rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); mutex_init(&mm->mutex); if (!rmm) { mm->block_size = block; kfree(heap); INIT_LIST_HEAD(&mm->nodes); return -ENOMEM; INIT_LIST_HEAD(&mm->free); } rmm->block_size = block; list_add(&heap->nl_entry, &mm->nodes); mutex_init(&rmm->mutex); list_add(&heap->fl_entry, &mm->free); INIT_LIST_HEAD(&rmm->nodes); INIT_LIST_HEAD(&rmm->free); list_add(&heap->nl_entry, &rmm->nodes); list_add(&heap->fl_entry, &rmm->free); *prmm = rmm; return 0; return 0; } } int int nouveau_mm_fini(struct nouveau_mm **prmm) nouveau_mm_fini(struct nouveau_mm *mm) { { struct nouveau_mm *rmm = *prmm; struct nouveau_mm_node *node, *heap = struct nouveau_mm_node *node, *heap = list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); if (!list_is_singular(&rmm->nodes)) { if (!list_is_singular(&mm->nodes)) { printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); list_for_each_entry(node, &rmm->nodes, nl_entry) { list_for_each_entry(node, &mm->nodes, nl_entry) { printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", node->type, node->offset, node->length); node->type, node->offset, node->length); } } Loading @@ -172,7 +164,5 @@ nouveau_mm_fini(struct nouveau_mm **prmm) } } kfree(heap); kfree(heap); kfree(rmm); *prmm = NULL; return 0; return 0; } }
drivers/gpu/drm/nouveau/nouveau_mm.h +2 −2 Original line number Original line Diff line number Diff line Loading @@ -44,8 +44,8 @@ struct nouveau_mm { u32 block_size; u32 block_size; }; }; int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); int nouveau_mm_fini(struct nouveau_mm **); int nouveau_mm_fini(struct nouveau_mm *); int nouveau_mm_pre(struct nouveau_mm *); int nouveau_mm_pre(struct nouveau_mm *); int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, u32 align, struct nouveau_mm_node **); u32 align, struct nouveau_mm_node **); Loading
drivers/gpu/drm/nouveau/nouveau_vm.c +19 −19 Original line number Original line Diff line number Diff line Loading @@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) vm->map_pgt(vpgd->obj, pde, vpgt->obj); vm->map_pgt(vpgd->obj, pde, vpgt->obj); } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgt); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); } } } } Loading @@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) pgt_size = (1 << (vm->pgt_bits + 12)) >> type; pgt_size = (1 << (vm->pgt_bits + 12)) >> type; pgt_size *= 8; pgt_size *= 8; mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &pgt); NVOBJ_FLAG_ZERO_ALLOC, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); if (unlikely(ret)) if (unlikely(ret)) return ret; return ret; /* someone beat us to filling the PDE while we didn't have the lock */ /* someone beat us to filling the PDE while we didn't have the lock */ if (unlikely(vpgt->refcount[big]++)) { if (unlikely(vpgt->refcount[big]++)) { mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgt); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); return 0; return 0; } } Loading @@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, u32 fpde, lpde, pde; u32 fpde, lpde, pde; int ret; int ret; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); if (unlikely(ret != 0)) { if (unlikely(ret != 0)) { mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); return ret; return ret; } } Loading @@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, if (ret) { if (ret) { if (pde != fpde) if (pde != fpde) nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); nouveau_mm_put(vm->mm, vma->node); nouveau_mm_put(&vm->mm, vma->node); mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); vma->node = NULL; vma->node = NULL; return ret; return ret; } } } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); vma->vm = vm; vma->vm = vm; vma->offset = (u64)vma->node->offset << 12; vma->offset = (u64)vma->node->offset << 12; Loading @@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma) fpde = (vma->node->offset >> vm->pgt_bits); fpde = (vma->node->offset >> vm->pgt_bits); lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); nouveau_mm_put(vm->mm, vma->node); nouveau_mm_put(&vm->mm, vma->node); vma->node = NULL; vma->node = NULL; mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); } } int int Loading Loading @@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) nouveau_gpuobj_ref(pgd, &vpgd->obj); nouveau_gpuobj_ref(pgd, &vpgd->obj); mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); for (i = vm->fpde; i <= vm->lpde; i++) for (i = vm->fpde; i <= vm->lpde; i++) vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); list_add(&vpgd->head, &vm->pgd_list); list_add(&vpgd->head, &vm->pgd_list); mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); return 0; return 0; } } Loading @@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) if (!mpgd) if (!mpgd) return; return; mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm.mutex); list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { if (vpgd->obj == mpgd) { if (vpgd->obj == mpgd) { pgd = vpgd->obj; pgd = vpgd->obj; Loading @@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) break; break; } } } } mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm.mutex); nouveau_gpuobj_ref(NULL, &pgd); nouveau_gpuobj_ref(NULL, &pgd); } } Loading
drivers/gpu/drm/nouveau/nouveau_vm.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -51,7 +51,7 @@ struct nouveau_vma { struct nouveau_vm { struct nouveau_vm { struct drm_device *dev; struct drm_device *dev; struct nouveau_mm *mm; struct nouveau_mm mm; int refcount; int refcount; struct list_head pgd_list; struct list_head pgd_list; Loading