Loading drivers/media/platform/msm/synx/synx.c +70 −65 Original line number Diff line number Diff line Loading @@ -29,9 +29,9 @@ void synx_external_callback(s32 sync_obj, int status, void *data) } if (row) { spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); row->signaling_id = sync_obj; spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("signaling synx 0x%x from external callback %d\n", synx_obj, sync_obj); Loading Loading @@ -138,23 +138,23 @@ int synx_register_callback(s32 synx_obj, if (!row || !cb_func) return -EINVAL; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); /* do not register if callback registered earlier */ list_for_each_entry(temp_cb_info, &row->callback_list, list) { if (temp_cb_info->callback_func == cb_func && temp_cb_info->cb_data == userdata) { pr_err("duplicate registration for synx 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return -EALREADY; } } synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC); synx_cb = kzalloc(sizeof(*synx_cb), GFP_KERNEL); if (!synx_cb) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return -ENOMEM; } Loading @@ -171,12 +171,12 @@ int synx_register_callback(s32 synx_obj, synx_cb->synx_obj); queue_work(synx_dev->work_queue, &synx_cb->cb_dispatch_work); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } list_add_tail(&synx_cb->list, &row->callback_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading @@ -196,9 +196,9 @@ int synx_deregister_callback(s32 synx_obj, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); pr_debug("de-registering callback for synx 0x%x\n", synx_obj); list_for_each_entry_safe(synx_cb, temp, &row->callback_list, list) { Loading @@ -216,7 +216,7 @@ int synx_deregister_callback(s32 synx_obj, } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading Loading @@ -250,17 +250,17 @@ int synx_signal_core(struct synx_table_row *row, u32 status) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); return -EINVAL; } if (synx_status_locked(row) != SYNX_STATE_ACTIVE) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); if (synx_status(row) != SYNX_STATE_ACTIVE) { mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already signaled synx at %d\n", row->index); return -EALREADY; Loading @@ -270,7 +270,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) if (status == SYNX_STATE_SIGNALED_ERROR) dma_fence_set_error(row->fence, -EINVAL); rc = dma_fence_signal_locked(row->fence); rc = dma_fence_signal(row->fence); if (rc < 0) { pr_err("unable to signal synx at %d, err: %d\n", row->index, rc); Loading Loading @@ -308,7 +308,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) } row->num_bound_synxs = 0; } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); for (i = 0; i < idx; i++) { sync_id = bind_descs[i].external_desc.id[0]; Loading Loading @@ -450,11 +450,11 @@ static int synx_release_core(struct synx_table_row *row) * (definitely for merged synx on invoing deinit) * be carefull while accessing the metadata */ mutex_lock(&synx_dev->row_locks[row->index]); fence = row->fence; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); idx = row->index; if (!idx) { spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_err("object already cleaned up at %d\n", idx); return -EINVAL; } Loading @@ -468,7 +468,7 @@ static int synx_release_core(struct synx_table_row *row) /* do not reference fence and row in the function after this */ dma_fence_put(fence); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -502,14 +502,14 @@ int synx_wait(s32 synx_obj, u64 timeout_ms) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); return -EINVAL; } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); timeleft = dma_fence_wait_timeout(row->fence, (bool) 0, msecs_to_jiffies(timeout_ms)); Loading Loading @@ -560,11 +560,11 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (!data) return -ENOMEM; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); if (synx_status_locked(row) != SYNX_STATE_ACTIVE) { mutex_lock(&synx_dev->row_locks[row->index]); if (synx_status(row) != SYNX_STATE_ACTIVE) { pr_err("bind to non-active synx is prohibited 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -EINVAL; } Loading @@ -572,7 +572,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (row->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) { pr_err("max number of bindings reached for synx_objs 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -ENOMEM; } Loading @@ -583,7 +583,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) row->bound_synxs[i].external_desc.id[0]) { pr_err("duplicate binding for external sync %d\n", external_sync.id[0]); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -EALREADY; } Loading @@ -598,7 +598,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (rc < 0) { pr_err("callback registration failed for %d\n", external_sync.id[0]); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return rc; } Loading @@ -607,7 +607,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) &external_sync, sizeof(struct synx_external_desc)); row->bound_synxs[row->num_bound_synxs].external_data = data; row->num_bound_synxs = row->num_bound_synxs + 1; spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("added external sync %d to bindings of 0x%x\n", external_sync.id[0], synx_obj); Loading Loading @@ -647,10 +647,10 @@ int synx_addrefcount(s32 synx_obj, s32 count) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); while (count--) dma_fence_get(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading @@ -661,6 +661,7 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj) struct dma_fence *fence; struct synx_obj_node *obj_node; struct synx_table_row *row = NULL; u32 index; pr_debug("Enter %s\n", __func__); Loading @@ -675,31 +676,35 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj) if (!obj_node) return -ENOMEM; mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); kfree(obj_node); return -EINVAL; } /* new global synx id */ id = synx_create_handle(row); if (id < 0) { fence = row->fence; index = row->index; if (is_merged_synx(row)) { clear_bit(row->index, synx_dev->bitmap); memset(row, 0, sizeof(*row)); clear_bit(index, synx_dev->bitmap); mutex_unlock(&synx_dev->row_locks[index]); } /* release the reference obtained during export */ dma_fence_put(fence); kfree(obj_node); pr_err("error creating handle for import\n"); return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); pr_err("object already cleaned up at %d\n", row->index); kfree(obj_node); return -EINVAL; } obj_node->synx_obj = id; list_add(&obj_node->list, &row->synx_obj_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); *new_synx_obj = id; pr_debug("Exit %s\n", __func__); Loading @@ -722,7 +727,7 @@ int synx_export(s32 synx_obj, u32 *import_key) if (rc < 0) return rc; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); /* * to make sure the synx is not lost if the process dies or * synx is released before any other process gets a chance to Loading @@ -731,7 +736,7 @@ int synx_export(s32 synx_obj, u32 *import_key) * be a dangling reference and needs to be garbage collected. */ dma_fence_get(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -960,16 +965,16 @@ static int synx_handle_register_user_payload( userpayload_info.payload, SYNX_PAYLOAD_WORDS * sizeof(__u64)); spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); if (state == SYNX_STATE_SIGNALED_SUCCESS || state == SYNX_STATE_SIGNALED_ERROR) { user_payload_kernel->data.status = state; spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_add_tail(&user_payload_kernel->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&client->eventq_lock); mutex_unlock(&synx_dev->row_locks[row->index]); wake_up_all(&client->wq); return 0; } Loading @@ -982,14 +987,14 @@ static int synx_handle_register_user_payload( user_payload_kernel->data.payload_data[1]) { pr_err("callback already registered on 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(user_payload_kernel); return -EALREADY; } } list_add_tail(&user_payload_kernel->list, &row->user_payload_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -1028,7 +1033,7 @@ static int synx_handle_deregister_user_payload( return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); list_for_each_entry_safe(user_payload_kernel, temp, Loading @@ -1044,7 +1049,7 @@ static int synx_handle_deregister_user_payload( } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); if (match_found) kfree(user_payload_kernel); Loading @@ -1066,9 +1071,9 @@ static int synx_handle_deregister_user_payload( data->synx_obj = synx_obj; data->status = SYNX_CALLBACK_RESULT_CANCELED; spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_add_tail(&user_payload_kernel->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); pr_debug("registered cancellation callback\n"); wake_up_all(&client->wq); } Loading Loading @@ -1236,17 +1241,17 @@ static ssize_t synx_read(struct file *filep, return -EINVAL; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); user_payload_kernel = list_first_entry_or_null( &client->eventq, struct synx_cb_data, list); if (!user_payload_kernel) { spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); return 0; } list_del_init(&user_payload_kernel->list); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); rc = size; if (copy_to_user(buf, Loading @@ -1272,11 +1277,11 @@ static unsigned int synx_poll(struct file *filep, client = filep->private_data; poll_wait(filep, &client->wq, poll_table); spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); /* if list has pending cb events, notify */ if (!list_empty(&client->eventq)) rc = POLLPRI; spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); pr_debug("Exit %s\n", __func__); Loading @@ -1299,7 +1304,7 @@ static int synx_open(struct inode *inode, struct file *filep) client->device = synx_dev; init_waitqueue_head(&client->wq); INIT_LIST_HEAD(&client->eventq); spin_lock_init(&client->eventq_lock); mutex_init(&client->eventq_lock); mutex_lock(&synx_dev->table_lock); list_add_tail(&client->list, &synx_dev->client_list); Loading @@ -1322,7 +1327,7 @@ static void synx_object_cleanup(struct synx_client *client) struct synx_table_row *row = synx_dev->synx_table + i; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (row->index) { list_for_each_entry_safe(payload_info, temp_payload_info, Loading @@ -1334,7 +1339,7 @@ static void synx_object_cleanup(struct synx_client *client) } } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } } Loading Loading @@ -1547,7 +1552,7 @@ static int __init synx_init(void) mutex_init(&synx_dev->vtbl_lock); for (idx = 0; idx < SYNX_MAX_OBJS; idx++) spin_lock_init(&synx_dev->row_spinlocks[idx]); mutex_init(&synx_dev->row_locks[idx]); idr_init(&synx_dev->synx_ids); spin_lock_init(&synx_dev->idr_lock); Loading drivers/media/platform/msm/synx/synx_debugfs.c +3 −3 Original line number Diff line number Diff line Loading @@ -74,7 +74,7 @@ static ssize_t synx_table_read(struct file *file, if (!row->index) continue; spin_lock_bh(&dev->row_spinlocks[row->index]); mutex_lock(&dev->row_locks[row->index]); if (columns & NAME_COLUMN) cur += scnprintf(cur, end - cur, "|%10s|", row->name); Loading @@ -82,7 +82,7 @@ static ssize_t synx_table_read(struct file *file, cur += scnprintf(cur, end - cur, "|%11d|", row->num_bound_synxs); if (columns & STATE_COLUMN) { state = synx_status_locked(row); state = synx_status(row); cur += scnprintf(cur, end - cur, "|%10d|", state); } Loading @@ -101,7 +101,7 @@ static ssize_t synx_table_read(struct file *file, "|0x%8x|", obj_node->synx_obj); } } spin_unlock_bh(&dev->row_spinlocks[row->index]); mutex_unlock(&dev->row_locks[row->index]); cur += scnprintf(cur, end - cur, "\n"); } if (columns & ERROR_CODES && !list_empty( Loading drivers/media/platform/msm/synx/synx_private.h +6 −4 Original line number Diff line number Diff line Loading @@ -129,6 +129,7 @@ struct synx_obj_node { * * @name : Optional string representation of the synx object * @fence : dma fence backing the synx object * @spinlock : Spinlock for the dma fence * @synx_obj_list : List of synx integer handles mapped * @index : Index of the spin lock table associated with synx obj * @num_bound_synxs : Number of external bound synx objects Loading @@ -141,6 +142,7 @@ struct synx_obj_node { struct synx_table_row { char name[SYNX_OBJ_NAME_LEN]; struct dma_fence *fence; spinlock_t *spinlock; struct list_head synx_obj_list; s32 index; u32 num_bound_synxs; Loading Loading @@ -190,7 +192,7 @@ struct synx_import_data { * @dev : Device type * @class : Device class * @synx_table : Table of all synx objects * @row_spinlocks : Spinlock array, one for each row in the table * @row_locks : Mutex lock array, one for each row in the table * @table_lock : Mutex used to lock the table * @open_cnt : Count of file open calls made on the synx driver * @work_queue : Work queue used for dispatching kernel callbacks Loading @@ -211,7 +213,7 @@ struct synx_device { dev_t dev; struct class *class; struct synx_table_row synx_table[SYNX_MAX_OBJS]; spinlock_t row_spinlocks[SYNX_MAX_OBJS]; struct mutex row_locks[SYNX_MAX_OBJS]; struct mutex table_lock; int open_cnt; struct workqueue_struct *work_queue; Loading @@ -233,14 +235,14 @@ struct synx_device { * specific details * * @device : Pointer to synx device structure * @eventq_lock : Spinlock for the event queue * @eventq_lock : Mutex for the event queue * @wq : Queue for the polling process * @eventq : All the user callback payloads * @list : List member used to append this node to client_list */ struct synx_client { struct synx_device *device; spinlock_t eventq_lock; struct mutex eventq_lock; wait_queue_head_t wq; struct list_head eventq; struct list_head list; Loading drivers/media/platform/msm/synx/synx_util.c +41 −27 Original line number Diff line number Diff line Loading @@ -30,6 +30,7 @@ int synx_init_object(struct synx_table_row *table, struct dma_fence_ops *ops) { struct dma_fence *fence = NULL; spinlock_t *spinlock = NULL; struct synx_table_row *row = table + idx; struct synx_obj_node *obj_node; Loading @@ -40,17 +41,26 @@ int synx_init_object(struct synx_table_row *table, if (!fence) return -ENOMEM; spinlock = kzalloc(sizeof(*spinlock), GFP_KERNEL); if (!spinlock) { kfree(fence); return -ENOMEM; } spin_lock_init(spinlock); obj_node = kzalloc(sizeof(*obj_node), GFP_KERNEL); if (!obj_node) { kfree(spinlock); kfree(fence); return -ENOMEM; } dma_fence_init(fence, ops, &synx_dev->row_spinlocks[idx], synx_dev->dma_context, 1); dma_fence_init(fence, ops, spinlock, synx_dev->dma_context, 1); spin_lock_bh(&synx_dev->row_spinlocks[idx]); mutex_lock(&synx_dev->row_locks[idx]); row->fence = fence; row->spinlock = spinlock; obj_node->synx_obj = id; row->index = idx; INIT_LIST_HEAD(&row->synx_obj_list); Loading @@ -60,10 +70,10 @@ int synx_init_object(struct synx_table_row *table, list_add(&obj_node->list, &row->synx_obj_list); if (name) strlcpy(row->name, name, sizeof(row->name)); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("synx obj init: id:0x%x state:%u fence: 0x%pK\n", synx_status_locked(row), fence); synx_status(row), fence); return 0; } Loading @@ -87,7 +97,7 @@ int synx_init_group_object(struct synx_table_row *table, if (!obj_node) return -ENOMEM; spin_lock_bh(&synx_dev->row_spinlocks[idx]); mutex_lock(&synx_dev->row_locks[idx]); row->fence = &array->base; obj_node->synx_obj = id; row->index = idx; Loading @@ -96,10 +106,10 @@ int synx_init_group_object(struct synx_table_row *table, INIT_LIST_HEAD(&row->user_payload_list); list_add(&obj_node->list, &row->synx_obj_list); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("synx group obj init: id:%d state:%u fence: 0x%pK\n", id, synx_status_locked(row), row->fence); id, synx_status(row), row->fence); return 0; } Loading @@ -114,7 +124,7 @@ void synx_callback_dispatch(struct synx_table_row *row) if (!row) return; state = synx_status_locked(row); state = synx_status(row); /* dispatch the kernel callbacks registered (if any) */ list_for_each_entry_safe(synx_cb, Loading @@ -135,9 +145,9 @@ void synx_callback_dispatch(struct synx_table_row *row) pr_err("invalid client member in cb list\n"); continue; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_move_tail(&payload_info->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); /* * since cb can be registered by multiple clients, * wake the process right away Loading Loading @@ -165,19 +175,20 @@ int synx_deinit_object(struct synx_table_row *row) struct synx_callback_info *synx_cb, *temp_cb; struct synx_cb_data *upayload_info, *temp_upayload; struct synx_obj_node *obj_node, *temp_obj_node; unsigned long flags; if (!row || !synx_dev) return -EINVAL; index = row->index; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); list_for_each_entry_safe(obj_node, temp_obj_node, &row->synx_obj_list, list) { if ((struct synx_table_row *)idr_remove(&synx_dev->synx_ids, obj_node->synx_obj) != row) { pr_err("removing data in idr table failed 0x%x\n", obj_node->synx_obj); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); return -EINVAL; } pr_debug("removed synx obj at 0x%x successful\n", Loading @@ -185,7 +196,7 @@ int synx_deinit_object(struct synx_table_row *row) list_del_init(&obj_node->list); kfree(obj_node); } spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); /* * release the fence memory only for individual obj. Loading @@ -193,6 +204,7 @@ int synx_deinit_object(struct synx_table_row *row) * in its registered release function. */ if (!is_merged_synx(row)) { kfree(row->spinlock); kfree(row->fence); /* Loading @@ -212,9 +224,9 @@ int synx_deinit_object(struct synx_table_row *row) pr_err("invalid client member in cb list\n"); continue; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_move_tail(&upayload_info->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); /* * since cb can be registered by multiple clients, * wake the process right away Loading @@ -233,8 +245,8 @@ int synx_deinit_object(struct synx_table_row *row) } } clear_bit(row->index, synx_dev->bitmap); memset(row, 0, sizeof(*row)); clear_bit(index, synx_dev->bitmap); pr_debug("destroying synx obj at %d successful\n", index); return 0; Loading Loading @@ -343,9 +355,9 @@ s32 synx_merge_error(s32 *synx_objs, u32 num_objs) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); synx_release_reference(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } return 0; Loading Loading @@ -374,9 +386,9 @@ int synx_util_validate_merge(s32 *synx_objs, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); count += synx_add_reference(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } fences = kcalloc(count, sizeof(*fences), GFP_KERNEL); Loading @@ -395,9 +407,9 @@ int synx_util_validate_merge(s32 *synx_objs, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); count += synx_fence_add(row->fence, fences, count); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } /* eliminate duplicates */ Loading Loading @@ -545,14 +557,15 @@ void *synx_from_handle(s32 synx_obj) { s32 base; struct synx_table_row *row; unsigned long flags; if (!synx_dev) return NULL; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, synx_obj); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); if (!row) { pr_err( Loading @@ -575,14 +588,15 @@ s32 synx_create_handle(void *pObj) { s32 base = current->tgid << 16; s32 id; unsigned long flags; if (!synx_dev) return -EINVAL; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); id = idr_alloc(&synx_dev->synx_ids, pObj, base, base + 0x10000, GFP_ATOMIC); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n", id, base, current->tgid); Loading Loading
drivers/media/platform/msm/synx/synx.c +70 −65 Original line number Diff line number Diff line Loading @@ -29,9 +29,9 @@ void synx_external_callback(s32 sync_obj, int status, void *data) } if (row) { spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); row->signaling_id = sync_obj; spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("signaling synx 0x%x from external callback %d\n", synx_obj, sync_obj); Loading Loading @@ -138,23 +138,23 @@ int synx_register_callback(s32 synx_obj, if (!row || !cb_func) return -EINVAL; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); /* do not register if callback registered earlier */ list_for_each_entry(temp_cb_info, &row->callback_list, list) { if (temp_cb_info->callback_func == cb_func && temp_cb_info->cb_data == userdata) { pr_err("duplicate registration for synx 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return -EALREADY; } } synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC); synx_cb = kzalloc(sizeof(*synx_cb), GFP_KERNEL); if (!synx_cb) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return -ENOMEM; } Loading @@ -171,12 +171,12 @@ int synx_register_callback(s32 synx_obj, synx_cb->synx_obj); queue_work(synx_dev->work_queue, &synx_cb->cb_dispatch_work); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } list_add_tail(&synx_cb->list, &row->callback_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading @@ -196,9 +196,9 @@ int synx_deregister_callback(s32 synx_obj, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); pr_debug("de-registering callback for synx 0x%x\n", synx_obj); list_for_each_entry_safe(synx_cb, temp, &row->callback_list, list) { Loading @@ -216,7 +216,7 @@ int synx_deregister_callback(s32 synx_obj, } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading Loading @@ -250,17 +250,17 @@ int synx_signal_core(struct synx_table_row *row, u32 status) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); return -EINVAL; } if (synx_status_locked(row) != SYNX_STATE_ACTIVE) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); if (synx_status(row) != SYNX_STATE_ACTIVE) { mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already signaled synx at %d\n", row->index); return -EALREADY; Loading @@ -270,7 +270,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) if (status == SYNX_STATE_SIGNALED_ERROR) dma_fence_set_error(row->fence, -EINVAL); rc = dma_fence_signal_locked(row->fence); rc = dma_fence_signal(row->fence); if (rc < 0) { pr_err("unable to signal synx at %d, err: %d\n", row->index, rc); Loading Loading @@ -308,7 +308,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) } row->num_bound_synxs = 0; } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); for (i = 0; i < idx; i++) { sync_id = bind_descs[i].external_desc.id[0]; Loading Loading @@ -450,11 +450,11 @@ static int synx_release_core(struct synx_table_row *row) * (definitely for merged synx on invoing deinit) * be carefull while accessing the metadata */ mutex_lock(&synx_dev->row_locks[row->index]); fence = row->fence; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); idx = row->index; if (!idx) { spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_err("object already cleaned up at %d\n", idx); return -EINVAL; } Loading @@ -468,7 +468,7 @@ static int synx_release_core(struct synx_table_row *row) /* do not reference fence and row in the function after this */ dma_fence_put(fence); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -502,14 +502,14 @@ int synx_wait(s32 synx_obj, u64 timeout_ms) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); return -EINVAL; } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); timeleft = dma_fence_wait_timeout(row->fence, (bool) 0, msecs_to_jiffies(timeout_ms)); Loading Loading @@ -560,11 +560,11 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (!data) return -ENOMEM; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); if (synx_status_locked(row) != SYNX_STATE_ACTIVE) { mutex_lock(&synx_dev->row_locks[row->index]); if (synx_status(row) != SYNX_STATE_ACTIVE) { pr_err("bind to non-active synx is prohibited 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -EINVAL; } Loading @@ -572,7 +572,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (row->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) { pr_err("max number of bindings reached for synx_objs 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -ENOMEM; } Loading @@ -583,7 +583,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) row->bound_synxs[i].external_desc.id[0]) { pr_err("duplicate binding for external sync %d\n", external_sync.id[0]); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return -EALREADY; } Loading @@ -598,7 +598,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (rc < 0) { pr_err("callback registration failed for %d\n", external_sync.id[0]); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(data); return rc; } Loading @@ -607,7 +607,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) &external_sync, sizeof(struct synx_external_desc)); row->bound_synxs[row->num_bound_synxs].external_data = data; row->num_bound_synxs = row->num_bound_synxs + 1; spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("added external sync %d to bindings of 0x%x\n", external_sync.id[0], synx_obj); Loading Loading @@ -647,10 +647,10 @@ int synx_addrefcount(s32 synx_obj, s32 count) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); while (count--) dma_fence_get(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); return 0; } Loading @@ -661,6 +661,7 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj) struct dma_fence *fence; struct synx_obj_node *obj_node; struct synx_table_row *row = NULL; u32 index; pr_debug("Enter %s\n", __func__); Loading @@ -675,31 +676,35 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj) if (!obj_node) return -ENOMEM; mutex_lock(&synx_dev->row_locks[row->index]); if (!row->index) { mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); kfree(obj_node); return -EINVAL; } /* new global synx id */ id = synx_create_handle(row); if (id < 0) { fence = row->fence; index = row->index; if (is_merged_synx(row)) { clear_bit(row->index, synx_dev->bitmap); memset(row, 0, sizeof(*row)); clear_bit(index, synx_dev->bitmap); mutex_unlock(&synx_dev->row_locks[index]); } /* release the reference obtained during export */ dma_fence_put(fence); kfree(obj_node); pr_err("error creating handle for import\n"); return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); if (!row->index) { spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); pr_err("object already cleaned up at %d\n", row->index); kfree(obj_node); return -EINVAL; } obj_node->synx_obj = id; list_add(&obj_node->list, &row->synx_obj_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); *new_synx_obj = id; pr_debug("Exit %s\n", __func__); Loading @@ -722,7 +727,7 @@ int synx_export(s32 synx_obj, u32 *import_key) if (rc < 0) return rc; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); /* * to make sure the synx is not lost if the process dies or * synx is released before any other process gets a chance to Loading @@ -731,7 +736,7 @@ int synx_export(s32 synx_obj, u32 *import_key) * be a dangling reference and needs to be garbage collected. */ dma_fence_get(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -960,16 +965,16 @@ static int synx_handle_register_user_payload( userpayload_info.payload, SYNX_PAYLOAD_WORDS * sizeof(__u64)); spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); state = synx_status(row); if (state == SYNX_STATE_SIGNALED_SUCCESS || state == SYNX_STATE_SIGNALED_ERROR) { user_payload_kernel->data.status = state; spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_add_tail(&user_payload_kernel->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&client->eventq_lock); mutex_unlock(&synx_dev->row_locks[row->index]); wake_up_all(&client->wq); return 0; } Loading @@ -982,14 +987,14 @@ static int synx_handle_register_user_payload( user_payload_kernel->data.payload_data[1]) { pr_err("callback already registered on 0x%x\n", synx_obj); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); kfree(user_payload_kernel); return -EALREADY; } } list_add_tail(&user_payload_kernel->list, &row->user_payload_list); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); pr_debug("Exit %s\n", __func__); return 0; Loading Loading @@ -1028,7 +1033,7 @@ static int synx_handle_deregister_user_payload( return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); state = synx_status_locked(row); list_for_each_entry_safe(user_payload_kernel, temp, Loading @@ -1044,7 +1049,7 @@ static int synx_handle_deregister_user_payload( } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); if (match_found) kfree(user_payload_kernel); Loading @@ -1066,9 +1071,9 @@ static int synx_handle_deregister_user_payload( data->synx_obj = synx_obj; data->status = SYNX_CALLBACK_RESULT_CANCELED; spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_add_tail(&user_payload_kernel->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); pr_debug("registered cancellation callback\n"); wake_up_all(&client->wq); } Loading Loading @@ -1236,17 +1241,17 @@ static ssize_t synx_read(struct file *filep, return -EINVAL; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); user_payload_kernel = list_first_entry_or_null( &client->eventq, struct synx_cb_data, list); if (!user_payload_kernel) { spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); return 0; } list_del_init(&user_payload_kernel->list); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); rc = size; if (copy_to_user(buf, Loading @@ -1272,11 +1277,11 @@ static unsigned int synx_poll(struct file *filep, client = filep->private_data; poll_wait(filep, &client->wq, poll_table); spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); /* if list has pending cb events, notify */ if (!list_empty(&client->eventq)) rc = POLLPRI; spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); pr_debug("Exit %s\n", __func__); Loading @@ -1299,7 +1304,7 @@ static int synx_open(struct inode *inode, struct file *filep) client->device = synx_dev; init_waitqueue_head(&client->wq); INIT_LIST_HEAD(&client->eventq); spin_lock_init(&client->eventq_lock); mutex_init(&client->eventq_lock); mutex_lock(&synx_dev->table_lock); list_add_tail(&client->list, &synx_dev->client_list); Loading @@ -1322,7 +1327,7 @@ static void synx_object_cleanup(struct synx_client *client) struct synx_table_row *row = synx_dev->synx_table + i; spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); if (row->index) { list_for_each_entry_safe(payload_info, temp_payload_info, Loading @@ -1334,7 +1339,7 @@ static void synx_object_cleanup(struct synx_client *client) } } } spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } } Loading Loading @@ -1547,7 +1552,7 @@ static int __init synx_init(void) mutex_init(&synx_dev->vtbl_lock); for (idx = 0; idx < SYNX_MAX_OBJS; idx++) spin_lock_init(&synx_dev->row_spinlocks[idx]); mutex_init(&synx_dev->row_locks[idx]); idr_init(&synx_dev->synx_ids); spin_lock_init(&synx_dev->idr_lock); Loading
drivers/media/platform/msm/synx/synx_debugfs.c +3 −3 Original line number Diff line number Diff line Loading @@ -74,7 +74,7 @@ static ssize_t synx_table_read(struct file *file, if (!row->index) continue; spin_lock_bh(&dev->row_spinlocks[row->index]); mutex_lock(&dev->row_locks[row->index]); if (columns & NAME_COLUMN) cur += scnprintf(cur, end - cur, "|%10s|", row->name); Loading @@ -82,7 +82,7 @@ static ssize_t synx_table_read(struct file *file, cur += scnprintf(cur, end - cur, "|%11d|", row->num_bound_synxs); if (columns & STATE_COLUMN) { state = synx_status_locked(row); state = synx_status(row); cur += scnprintf(cur, end - cur, "|%10d|", state); } Loading @@ -101,7 +101,7 @@ static ssize_t synx_table_read(struct file *file, "|0x%8x|", obj_node->synx_obj); } } spin_unlock_bh(&dev->row_spinlocks[row->index]); mutex_unlock(&dev->row_locks[row->index]); cur += scnprintf(cur, end - cur, "\n"); } if (columns & ERROR_CODES && !list_empty( Loading
drivers/media/platform/msm/synx/synx_private.h +6 −4 Original line number Diff line number Diff line Loading @@ -129,6 +129,7 @@ struct synx_obj_node { * * @name : Optional string representation of the synx object * @fence : dma fence backing the synx object * @spinlock : Spinlock for the dma fence * @synx_obj_list : List of synx integer handles mapped * @index : Index of the spin lock table associated with synx obj * @num_bound_synxs : Number of external bound synx objects Loading @@ -141,6 +142,7 @@ struct synx_obj_node { struct synx_table_row { char name[SYNX_OBJ_NAME_LEN]; struct dma_fence *fence; spinlock_t *spinlock; struct list_head synx_obj_list; s32 index; u32 num_bound_synxs; Loading Loading @@ -190,7 +192,7 @@ struct synx_import_data { * @dev : Device type * @class : Device class * @synx_table : Table of all synx objects * @row_spinlocks : Spinlock array, one for each row in the table * @row_locks : Mutex lock array, one for each row in the table * @table_lock : Mutex used to lock the table * @open_cnt : Count of file open calls made on the synx driver * @work_queue : Work queue used for dispatching kernel callbacks Loading @@ -211,7 +213,7 @@ struct synx_device { dev_t dev; struct class *class; struct synx_table_row synx_table[SYNX_MAX_OBJS]; spinlock_t row_spinlocks[SYNX_MAX_OBJS]; struct mutex row_locks[SYNX_MAX_OBJS]; struct mutex table_lock; int open_cnt; struct workqueue_struct *work_queue; Loading @@ -233,14 +235,14 @@ struct synx_device { * specific details * * @device : Pointer to synx device structure * @eventq_lock : Spinlock for the event queue * @eventq_lock : Mutex for the event queue * @wq : Queue for the polling process * @eventq : All the user callback payloads * @list : List member used to append this node to client_list */ struct synx_client { struct synx_device *device; spinlock_t eventq_lock; struct mutex eventq_lock; wait_queue_head_t wq; struct list_head eventq; struct list_head list; Loading
drivers/media/platform/msm/synx/synx_util.c +41 −27 Original line number Diff line number Diff line Loading @@ -30,6 +30,7 @@ int synx_init_object(struct synx_table_row *table, struct dma_fence_ops *ops) { struct dma_fence *fence = NULL; spinlock_t *spinlock = NULL; struct synx_table_row *row = table + idx; struct synx_obj_node *obj_node; Loading @@ -40,17 +41,26 @@ int synx_init_object(struct synx_table_row *table, if (!fence) return -ENOMEM; spinlock = kzalloc(sizeof(*spinlock), GFP_KERNEL); if (!spinlock) { kfree(fence); return -ENOMEM; } spin_lock_init(spinlock); obj_node = kzalloc(sizeof(*obj_node), GFP_KERNEL); if (!obj_node) { kfree(spinlock); kfree(fence); return -ENOMEM; } dma_fence_init(fence, ops, &synx_dev->row_spinlocks[idx], synx_dev->dma_context, 1); dma_fence_init(fence, ops, spinlock, synx_dev->dma_context, 1); spin_lock_bh(&synx_dev->row_spinlocks[idx]); mutex_lock(&synx_dev->row_locks[idx]); row->fence = fence; row->spinlock = spinlock; obj_node->synx_obj = id; row->index = idx; INIT_LIST_HEAD(&row->synx_obj_list); Loading @@ -60,10 +70,10 @@ int synx_init_object(struct synx_table_row *table, list_add(&obj_node->list, &row->synx_obj_list); if (name) strlcpy(row->name, name, sizeof(row->name)); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("synx obj init: id:0x%x state:%u fence: 0x%pK\n", synx_status_locked(row), fence); synx_status(row), fence); return 0; } Loading @@ -87,7 +97,7 @@ int synx_init_group_object(struct synx_table_row *table, if (!obj_node) return -ENOMEM; spin_lock_bh(&synx_dev->row_spinlocks[idx]); mutex_lock(&synx_dev->row_locks[idx]); row->fence = &array->base; obj_node->synx_obj = id; row->index = idx; Loading @@ -96,10 +106,10 @@ int synx_init_group_object(struct synx_table_row *table, INIT_LIST_HEAD(&row->user_payload_list); list_add(&obj_node->list, &row->synx_obj_list); spin_unlock_bh(&synx_dev->row_spinlocks[idx]); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("synx group obj init: id:%d state:%u fence: 0x%pK\n", id, synx_status_locked(row), row->fence); id, synx_status(row), row->fence); return 0; } Loading @@ -114,7 +124,7 @@ void synx_callback_dispatch(struct synx_table_row *row) if (!row) return; state = synx_status_locked(row); state = synx_status(row); /* dispatch the kernel callbacks registered (if any) */ list_for_each_entry_safe(synx_cb, Loading @@ -135,9 +145,9 @@ void synx_callback_dispatch(struct synx_table_row *row) pr_err("invalid client member in cb list\n"); continue; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_move_tail(&payload_info->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); /* * since cb can be registered by multiple clients, * wake the process right away Loading Loading @@ -165,19 +175,20 @@ int synx_deinit_object(struct synx_table_row *row) struct synx_callback_info *synx_cb, *temp_cb; struct synx_cb_data *upayload_info, *temp_upayload; struct synx_obj_node *obj_node, *temp_obj_node; unsigned long flags; if (!row || !synx_dev) return -EINVAL; index = row->index; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); list_for_each_entry_safe(obj_node, temp_obj_node, &row->synx_obj_list, list) { if ((struct synx_table_row *)idr_remove(&synx_dev->synx_ids, obj_node->synx_obj) != row) { pr_err("removing data in idr table failed 0x%x\n", obj_node->synx_obj); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); return -EINVAL; } pr_debug("removed synx obj at 0x%x successful\n", Loading @@ -185,7 +196,7 @@ int synx_deinit_object(struct synx_table_row *row) list_del_init(&obj_node->list); kfree(obj_node); } spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); /* * release the fence memory only for individual obj. Loading @@ -193,6 +204,7 @@ int synx_deinit_object(struct synx_table_row *row) * in its registered release function. */ if (!is_merged_synx(row)) { kfree(row->spinlock); kfree(row->fence); /* Loading @@ -212,9 +224,9 @@ int synx_deinit_object(struct synx_table_row *row) pr_err("invalid client member in cb list\n"); continue; } spin_lock_bh(&client->eventq_lock); mutex_lock(&client->eventq_lock); list_move_tail(&upayload_info->list, &client->eventq); spin_unlock_bh(&client->eventq_lock); mutex_unlock(&client->eventq_lock); /* * since cb can be registered by multiple clients, * wake the process right away Loading @@ -233,8 +245,8 @@ int synx_deinit_object(struct synx_table_row *row) } } clear_bit(row->index, synx_dev->bitmap); memset(row, 0, sizeof(*row)); clear_bit(index, synx_dev->bitmap); pr_debug("destroying synx obj at %d successful\n", index); return 0; Loading Loading @@ -343,9 +355,9 @@ s32 synx_merge_error(s32 *synx_objs, u32 num_objs) return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); synx_release_reference(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } return 0; Loading Loading @@ -374,9 +386,9 @@ int synx_util_validate_merge(s32 *synx_objs, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); count += synx_add_reference(row->fence); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } fences = kcalloc(count, sizeof(*fences), GFP_KERNEL); Loading @@ -395,9 +407,9 @@ int synx_util_validate_merge(s32 *synx_objs, return -EINVAL; } spin_lock_bh(&synx_dev->row_spinlocks[row->index]); mutex_lock(&synx_dev->row_locks[row->index]); count += synx_fence_add(row->fence, fences, count); spin_unlock_bh(&synx_dev->row_spinlocks[row->index]); mutex_unlock(&synx_dev->row_locks[row->index]); } /* eliminate duplicates */ Loading Loading @@ -545,14 +557,15 @@ void *synx_from_handle(s32 synx_obj) { s32 base; struct synx_table_row *row; unsigned long flags; if (!synx_dev) return NULL; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, synx_obj); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); if (!row) { pr_err( Loading @@ -575,14 +588,15 @@ s32 synx_create_handle(void *pObj) { s32 base = current->tgid << 16; s32 id; unsigned long flags; if (!synx_dev) return -EINVAL; spin_lock_bh(&synx_dev->idr_lock); spin_lock_irqsave(&synx_dev->idr_lock, flags); id = idr_alloc(&synx_dev->synx_ids, pObj, base, base + 0x10000, GFP_ATOMIC); spin_unlock_bh(&synx_dev->idr_lock); spin_unlock_irqrestore(&synx_dev->idr_lock, flags); pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n", id, base, current->tgid); Loading