Loading drivers/gpu/drm/msm/msm_drv.h +2 −0 Original line number Diff line number Diff line Loading @@ -153,6 +153,7 @@ struct msm_drm_private { struct drm_mm mm; } vram; struct notifier_block vmap_notifier; struct shrinker shrinker; struct msm_vblank_ctrl vblank_ctrl; Loading Loading @@ -206,6 +207,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, Loading drivers/gpu/drm/msm/msm_gem.c +20 −5 Original line number Diff line number Diff line Loading @@ -421,6 +421,7 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) if (msm_obj->vaddr == NULL) return ERR_PTR(-ENOMEM); } msm_obj->vmap_count++; return msm_obj->vaddr; } Loading @@ -435,13 +436,17 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj) void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); /* no-op for now */ WARN_ON(msm_obj->vmap_count < 1); msm_obj->vmap_count--; } void msm_gem_put_vaddr(struct drm_gem_object *obj) { /* no-op for now */ mutex_lock(&obj->dev->struct_mutex); msm_gem_put_vaddr_locked(obj); mutex_unlock(&obj->dev->struct_mutex); } /* Update madvise status, returns true if not purged, else Loading Loading @@ -470,8 +475,7 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova(obj); vunmap(msm_obj->vaddr); msm_obj->vaddr = NULL; msm_gem_vunmap(obj); put_pages(obj); Loading @@ -491,6 +495,17 @@ void msm_gem_purge(struct drm_gem_object *obj) 0, (loff_t)-1); } void msm_gem_vunmap(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; vunmap(msm_obj->vaddr); msm_obj->vaddr = NULL; } /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) Loading Loading @@ -694,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, msm_obj->sgt); } else { vunmap(msm_obj->vaddr); msm_gem_vunmap(obj); put_pages(obj); } Loading drivers/gpu/drm/msm/msm_gem.h +10 −0 Original line number Diff line number Diff line Loading @@ -34,6 +34,11 @@ struct msm_gem_object { */ uint8_t madv; /** * count of active vmap'ing */ uint8_t vmap_count; /* And object is either: * inactive - on priv->inactive_list * active - on one one of the gpu's active_list.. well, at Loading Loading @@ -83,6 +88,11 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) !msm_obj->base.dma_buf && !msm_obj->base.import_attach; } static inline bool is_vunmapable(struct msm_gem_object *msm_obj) { return (msm_obj->vmap_count == 0) && msm_obj->vaddr; } #define MAX_CMDS 4 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, Loading drivers/gpu/drm/msm/msm_gem_shrinker.c +40 −0 Original line number Diff line number Diff line Loading @@ -100,6 +100,42 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) return freed; } static int msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) { struct msm_drm_private *priv = container_of(nb, struct msm_drm_private, vmap_notifier); struct drm_device *dev = priv->dev; struct msm_gem_object *msm_obj; unsigned unmapped = 0; bool unlock; if (!msm_gem_shrinker_lock(dev, &unlock)) return NOTIFY_DONE; list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { if (is_vunmapable(msm_obj)) { msm_gem_vunmap(&msm_obj->base); /* since we don't know any better, lets bail after a few * and if necessary the shrinker will be invoked again. * Seems better than unmapping *everything* */ if (++unmapped >= 15) break; } } if (unlock) mutex_unlock(&dev->struct_mutex); *(unsigned long *)ptr += unmapped; if (unmapped > 0) pr_info_ratelimited("Purging %u vmaps\n", unmapped); return NOTIFY_DONE; } /** * msm_gem_shrinker_init - Initialize msm shrinker * @dev_priv: msm device Loading @@ -113,6 +149,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) priv->shrinker.scan_objects = msm_gem_shrinker_scan; priv->shrinker.seeks = DEFAULT_SEEKS; WARN_ON(register_shrinker(&priv->shrinker)); priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap; WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier)); } /** Loading @@ -124,5 +163,6 @@ void msm_gem_shrinker_init(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); unregister_shrinker(&priv->shrinker); } Loading
drivers/gpu/drm/msm/msm_drv.h +2 −0 Original line number Diff line number Diff line Loading @@ -153,6 +153,7 @@ struct msm_drm_private { struct drm_mm mm; } vram; struct notifier_block vmap_notifier; struct shrinker shrinker; struct msm_vblank_ctrl vblank_ctrl; Loading Loading @@ -206,6 +207,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, Loading
drivers/gpu/drm/msm/msm_gem.c +20 −5 Original line number Diff line number Diff line Loading @@ -421,6 +421,7 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) if (msm_obj->vaddr == NULL) return ERR_PTR(-ENOMEM); } msm_obj->vmap_count++; return msm_obj->vaddr; } Loading @@ -435,13 +436,17 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj) void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); /* no-op for now */ WARN_ON(msm_obj->vmap_count < 1); msm_obj->vmap_count--; } void msm_gem_put_vaddr(struct drm_gem_object *obj) { /* no-op for now */ mutex_lock(&obj->dev->struct_mutex); msm_gem_put_vaddr_locked(obj); mutex_unlock(&obj->dev->struct_mutex); } /* Update madvise status, returns true if not purged, else Loading Loading @@ -470,8 +475,7 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova(obj); vunmap(msm_obj->vaddr); msm_obj->vaddr = NULL; msm_gem_vunmap(obj); put_pages(obj); Loading @@ -491,6 +495,17 @@ void msm_gem_purge(struct drm_gem_object *obj) 0, (loff_t)-1); } void msm_gem_vunmap(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; vunmap(msm_obj->vaddr); msm_obj->vaddr = NULL; } /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) Loading Loading @@ -694,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, msm_obj->sgt); } else { vunmap(msm_obj->vaddr); msm_gem_vunmap(obj); put_pages(obj); } Loading
drivers/gpu/drm/msm/msm_gem.h +10 −0 Original line number Diff line number Diff line Loading @@ -34,6 +34,11 @@ struct msm_gem_object { */ uint8_t madv; /** * count of active vmap'ing */ uint8_t vmap_count; /* And object is either: * inactive - on priv->inactive_list * active - on one one of the gpu's active_list.. well, at Loading Loading @@ -83,6 +88,11 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) !msm_obj->base.dma_buf && !msm_obj->base.import_attach; } static inline bool is_vunmapable(struct msm_gem_object *msm_obj) { return (msm_obj->vmap_count == 0) && msm_obj->vaddr; } #define MAX_CMDS 4 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, Loading
drivers/gpu/drm/msm/msm_gem_shrinker.c +40 −0 Original line number Diff line number Diff line Loading @@ -100,6 +100,42 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) return freed; } static int msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) { struct msm_drm_private *priv = container_of(nb, struct msm_drm_private, vmap_notifier); struct drm_device *dev = priv->dev; struct msm_gem_object *msm_obj; unsigned unmapped = 0; bool unlock; if (!msm_gem_shrinker_lock(dev, &unlock)) return NOTIFY_DONE; list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { if (is_vunmapable(msm_obj)) { msm_gem_vunmap(&msm_obj->base); /* since we don't know any better, lets bail after a few * and if necessary the shrinker will be invoked again. * Seems better than unmapping *everything* */ if (++unmapped >= 15) break; } } if (unlock) mutex_unlock(&dev->struct_mutex); *(unsigned long *)ptr += unmapped; if (unmapped > 0) pr_info_ratelimited("Purging %u vmaps\n", unmapped); return NOTIFY_DONE; } /** * msm_gem_shrinker_init - Initialize msm shrinker * @dev_priv: msm device Loading @@ -113,6 +149,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) priv->shrinker.scan_objects = msm_gem_shrinker_scan; priv->shrinker.seeks = DEFAULT_SEEKS; WARN_ON(register_shrinker(&priv->shrinker)); priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap; WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier)); } /** Loading @@ -124,5 +163,6 @@ void msm_gem_shrinker_init(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); unregister_shrinker(&priv->shrinker); }