Loading drivers/gpu/drm/armada/armada_gem.c +2 −2 Original line number Diff line number Diff line Loading @@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; mapping = file_inode(obj->obj.filp)->i_mapping; mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); Loading Loading @@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; mapping = file_inode(dobj->obj.filp)->i_mapping; mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; Loading drivers/gpu/drm/drm_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) int i, npages; /* This is the shared memory object that backs the GEM resource */ mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless Loading drivers/gpu/drm/etnaviv/etnaviv_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -660,7 +660,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * why this is required _and_ expected if you're * going to pin these pages. */ mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER); } Loading drivers/gpu/drm/i915/i915_gem.c +5 −5 Original line number Diff line number Diff line Loading @@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; Loading Loading @@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; Loading Loading @@ -2155,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; mapping = file_inode(obj->base.filp)->i_mapping, mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } Loading Loading @@ -2271,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ mapping = file_inode(obj->base.filp)->i_mapping; mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; Loading Loading @@ -4522,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mask |= __GFP_DMA32; } mapping = file_inode(obj->base.filp)->i_mapping; mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); Loading drivers/gpu/drm/omapdrm/omap_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -1406,7 +1406,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (ret) goto err_free; mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); } Loading Loading
drivers/gpu/drm/armada/armada_gem.c +2 −2 Original line number Diff line number Diff line Loading @@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; mapping = file_inode(obj->obj.filp)->i_mapping; mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); Loading Loading @@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; mapping = file_inode(dobj->obj.filp)->i_mapping; mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; Loading
drivers/gpu/drm/drm_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) int i, npages; /* This is the shared memory object that backs the GEM resource */ mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless Loading
drivers/gpu/drm/etnaviv/etnaviv_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -660,7 +660,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * why this is required _and_ expected if you're * going to pin these pages. */ mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER); } Loading
drivers/gpu/drm/i915/i915_gem.c +5 −5 Original line number Diff line number Diff line Loading @@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; Loading Loading @@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; Loading Loading @@ -2155,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; mapping = file_inode(obj->base.filp)->i_mapping, mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } Loading Loading @@ -2271,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ mapping = file_inode(obj->base.filp)->i_mapping; mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; Loading Loading @@ -4522,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mask |= __GFP_DMA32; } mapping = file_inode(obj->base.filp)->i_mapping; mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); Loading
drivers/gpu/drm/omapdrm/omap_gem.c +1 −1 Original line number Diff line number Diff line Loading @@ -1406,7 +1406,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (ret) goto err_free; mapping = file_inode(obj->filp)->i_mapping; mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); } Loading