Loading drivers/gpu/drm/msm/msm_drv.c +33 −0 Original line number Diff line number Diff line Loading @@ -1537,6 +1537,37 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data, return 0; } static int msm_ioctl_gem_sync(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_sync *arg = data; int i; for (i = 0; i < arg->nr_ops; i++) { struct drm_msm_gem_syncop syncop; struct drm_gem_object *obj; int ret; void __user *ptr = (void __user *)(uintptr_t) (arg->ops + (i * sizeof(syncop))); ret = copy_from_user(&syncop, ptr, sizeof(syncop)); if (ret) return -EFAULT; obj = drm_gem_object_lookup(dev, file, syncop.handle); if (!obj) return -ENOENT; msm_gem_sync(obj, syncop.op); drm_gem_object_unreference_unlocked(obj); } return 0; } void msm_send_crtc_notification(struct drm_crtc *crtc, struct drm_event *event, u8 *payload) { Loading Loading @@ -1665,6 +1696,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { Loading drivers/gpu/drm/msm/msm_drv.h +1 −0 Original line number Diff line number Diff line Loading @@ -469,6 +469,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); struct drm_gem_object *msm_gem_import(struct drm_device *dev, uint32_t size, struct sg_table *sgt); void msm_gem_sync(struct drm_gem_object *obj, u32 op); int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); Loading drivers/gpu/drm/msm/msm_gem.c +20 −0 Original line number Diff line number Diff line Loading @@ -546,6 +546,26 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) return 0; } void msm_gem_sync(struct drm_gem_object *obj, u32 op) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) return; switch (op) { case MSM_GEM_SYNC_TO_CPU: dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); break; case MSM_GEM_SYNC_TO_DEV: dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); break; } } #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { Loading include/uapi/drm/msm_drm.h +15 −1 Original line number Diff line number Diff line Loading @@ -323,6 +323,18 @@ struct drm_msm_counter_read { __u32 nr_ops; }; #define MSM_GEM_SYNC_TO_DEV 0 #define MSM_GEM_SYNC_TO_CPU 1 struct drm_msm_gem_syncop { __u32 handle; __u32 op; }; struct drm_msm_gem_sync { __u32 nr_ops; __u64 __user ops; }; #define DRM_MSM_GET_PARAM 0x00 /* placeholder: Loading @@ -341,6 +353,7 @@ struct drm_msm_counter_read { #define DRM_MSM_COUNTER_GET 0x43 #define DRM_MSM_COUNTER_PUT 0x44 #define DRM_MSM_COUNTER_READ 0x45 #define DRM_MSM_GEM_SYNC 0x46 /** * Currently DRM framework supports only VSYNC event. Loading Loading @@ -370,5 +383,6 @@ struct drm_msm_counter_read { #define DRM_IOCTL_MSM_COUNTER_READ \ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_READ, \ struct drm_msm_counter_read) #define DRM_IOCTL_MSM_GEM_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_GEM_SYNC,\ struct drm_msm_gem_sync) #endif /* __MSM_DRM_H__ */ Loading
drivers/gpu/drm/msm/msm_drv.c +33 −0 Original line number Diff line number Diff line Loading @@ -1537,6 +1537,37 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data, return 0; } static int msm_ioctl_gem_sync(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_sync *arg = data; int i; for (i = 0; i < arg->nr_ops; i++) { struct drm_msm_gem_syncop syncop; struct drm_gem_object *obj; int ret; void __user *ptr = (void __user *)(uintptr_t) (arg->ops + (i * sizeof(syncop))); ret = copy_from_user(&syncop, ptr, sizeof(syncop)); if (ret) return -EFAULT; obj = drm_gem_object_lookup(dev, file, syncop.handle); if (!obj) return -ENOENT; msm_gem_sync(obj, syncop.op); drm_gem_object_unreference_unlocked(obj); } return 0; } void msm_send_crtc_notification(struct drm_crtc *crtc, struct drm_event *event, u8 *payload) { Loading Loading @@ -1665,6 +1696,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { Loading
drivers/gpu/drm/msm/msm_drv.h +1 −0 Original line number Diff line number Diff line Loading @@ -469,6 +469,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); struct drm_gem_object *msm_gem_import(struct drm_device *dev, uint32_t size, struct sg_table *sgt); void msm_gem_sync(struct drm_gem_object *obj, u32 op); int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); Loading
drivers/gpu/drm/msm/msm_gem.c +20 −0 Original line number Diff line number Diff line Loading @@ -546,6 +546,26 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) return 0; } void msm_gem_sync(struct drm_gem_object *obj, u32 op) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) return; switch (op) { case MSM_GEM_SYNC_TO_CPU: dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); break; case MSM_GEM_SYNC_TO_DEV: dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); break; } } #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { Loading
include/uapi/drm/msm_drm.h +15 −1 Original line number Diff line number Diff line Loading @@ -323,6 +323,18 @@ struct drm_msm_counter_read { __u32 nr_ops; }; #define MSM_GEM_SYNC_TO_DEV 0 #define MSM_GEM_SYNC_TO_CPU 1 struct drm_msm_gem_syncop { __u32 handle; __u32 op; }; struct drm_msm_gem_sync { __u32 nr_ops; __u64 __user ops; }; #define DRM_MSM_GET_PARAM 0x00 /* placeholder: Loading @@ -341,6 +353,7 @@ struct drm_msm_counter_read { #define DRM_MSM_COUNTER_GET 0x43 #define DRM_MSM_COUNTER_PUT 0x44 #define DRM_MSM_COUNTER_READ 0x45 #define DRM_MSM_GEM_SYNC 0x46 /** * Currently DRM framework supports only VSYNC event. Loading Loading @@ -370,5 +383,6 @@ struct drm_msm_counter_read { #define DRM_IOCTL_MSM_COUNTER_READ \ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_READ, \ struct drm_msm_counter_read) #define DRM_IOCTL_MSM_GEM_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_GEM_SYNC,\ struct drm_msm_gem_sync) #endif /* __MSM_DRM_H__ */