Loading drivers/soc/qcom/glink.c +92 −45 Original line number Diff line number Diff line Loading @@ -84,6 +84,7 @@ struct glink_qos_priority_bin { * @tx_wq: workqueue to run @tx_kwork * @tx_task: handle to the running kthread * @channels: list of all existing channels on this transport * @dummy_in_use: True when channels are being migrated to dummy. * @mtu: MTU supported by this transport. * @token_count: Number of tokens to be assigned per assignment. * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests. Loading Loading @@ -119,6 +120,7 @@ struct glink_core_xprt_ctx { struct list_head channels; uint32_t next_lcid; struct list_head free_lcid_list; bool dummy_in_use; uint32_t max_cid; uint32_t max_iid; Loading Loading @@ -393,7 +395,7 @@ static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx, static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if *if_ptr, uint32_t rcid, bool granted); static bool glink_core_remote_close_common(struct channel_ctx *ctx); static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe); static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr, enum glink_link_state link_state); Loading Loading @@ -466,17 +468,26 @@ EXPORT_SYMBOL(glink_ssr); * glink_core_ch_close_ack_common() - handles the common operations during * close ack. * @ctx: Pointer to channel instance. * @is_safe: Is function called while holding ctx lock * * Return: True if the channel is fully closed after the state change, * false otherwise. */ static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx) static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe) { bool is_fully_closed; if (ctx == NULL) return false; is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED); if (safe) { ctx->local_open_state = GLINK_CHANNEL_CLOSED; is_fully_closed = ch_is_fully_closed(ctx); } else { is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED); } GLINK_INFO_PERF_CH(ctx, "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n", __func__); Loading @@ -497,17 +508,23 @@ static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx) * glink_core_remote_close_common() - Handles the common operations during * a remote close. * @ctx: Pointer to channel instance. * * @safe: Is function called with ctx rwref lock already acquired. * Return: True if the channel is fully closed after the state change, * false otherwise. */ static bool glink_core_remote_close_common(struct channel_ctx *ctx) static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe) { bool is_fully_closed; if (ctx == NULL) return false; if (safe) { ctx->remote_opened = false; is_fully_closed = ch_is_fully_closed(ctx); } else { is_fully_closed = ch_update_rmt_state(ctx, false); } ctx->rcid = 0; if (ctx->local_open_state != GLINK_CHANNEL_CLOSED && Loading Loading @@ -2508,14 +2525,20 @@ EXPORT_SYMBOL(glink_get_channel_name_for_handle); * information associated with it. It also adds the channel lcid to the free * lcid list except if the channel is deleted in case of ssr/unregister case. * It can only called when channel is fully closed. * * Return: true when transport_ptr->channels is empty. */ static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) { unsigned long flags; bool ret = false; spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); if (!list_empty(&ctx->port_list_node)) list_del_init(&ctx->port_list_node); if (list_empty(&ctx->transport_ptr->channels)) ret = true; spin_unlock_irqrestore( &ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); Loading @@ -2525,6 +2548,7 @@ static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) glink_debugfs_remove_channel(ctx, ctx->transport_ptr); mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4); rwref_put(&ctx->ch_state_lhb2); return ret; } /** Loading @@ -2544,6 +2568,7 @@ int glink_close(void *handle) struct channel_ctx *ctx = (struct channel_ctx *)handle; int ret; unsigned long flags; bool is_empty = false; if (!ctx) return -EINVAL; Loading @@ -2557,6 +2582,16 @@ int glink_close(void *handle) return -EBUSY; } rwref_get(&ctx->ch_state_lhb2); relock: xprt_ctx = ctx->transport_ptr; rwref_read_get(&xprt_ctx->xprt_state_lhb0); rwref_write_get(&ctx->ch_state_lhb2); if (xprt_ctx != ctx->transport_ptr) { rwref_write_put(&ctx->ch_state_lhb2); rwref_read_put(&xprt_ctx->xprt_state_lhb0); goto relock; } /* Set the channel state before removing it from xprt's list(s) */ GLINK_INFO_PERF_CH(ctx, "%s: local:%u->GLINK_CHANNEL_CLOSING\n", Loading @@ -2565,33 +2600,29 @@ int glink_close(void *handle) ctx->pending_delete = true; ctx->int_req_ack = false; complete_all(&ctx->int_req_ack_complete); complete_all(&ctx->int_req_complete); spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags); spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags); if (!list_empty(&ctx->tx_ready_list_node)) list_del_init(&ctx->tx_ready_list_node); spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags); spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags); if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) { if (xprt_ctx->local_state != GLINK_XPRT_DOWN) { glink_qos_reset_priority(ctx); ret = ctx->transport_ptr->ops->tx_cmd_ch_close( ctx->transport_ptr->ops, ctx->lcid); } else if (!strcmp(ctx->transport_ptr->name, "dummy")) { ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid); rwref_write_put(&ctx->ch_state_lhb2); } else if (!strcmp(xprt_ctx->name, "dummy")) { /* * This check will avoid any race condition when clients call * glink_close before the dummy xprt swapping happens in link * down scenario. */ ret = 0; xprt_ctx = ctx->transport_ptr; rwref_write_get(&xprt_ctx->xprt_state_lhb0); glink_core_ch_close_ack_common(ctx); rwref_write_put(&ctx->ch_state_lhb2); glink_core_ch_close_ack_common(ctx, false); if (ch_is_fully_closed(ctx)) { glink_delete_ch_from_list(ctx, false); is_empty = glink_delete_ch_from_list(ctx, false); rwref_put(&xprt_ctx->xprt_state_lhb0); if (list_empty(&xprt_ctx->channels)) if (is_empty && !xprt_ctx->dummy_in_use) /* For the xprt reference */ rwref_put(&xprt_ctx->xprt_state_lhb0); } else { Loading @@ -2599,9 +2630,12 @@ int glink_close(void *handle) "channel Not closed yet local state [%d] remote_state [%d]\n", ctx->local_open_state, ctx->remote_opened); } rwref_write_put(&xprt_ctx->xprt_state_lhb0); } complete_all(&ctx->int_req_ack_complete); complete_all(&ctx->int_req_complete); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&xprt_ctx->xprt_state_lhb0); return ret; } EXPORT_SYMBOL(glink_close); Loading Loading @@ -2662,25 +2696,25 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!ctx) return -EINVAL; rwref_get(&ctx->ch_state_lhb2); rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic); if (!(vbuf_provider || pbuf_provider)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } if (!ch_is_fully_opened(ctx)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EBUSY; } if (size > GLINK_MAX_PKT_SIZE) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) { if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EOPNOTSUPP; } tracer_pkt_log_event(data, GLINK_CORE_TX); Loading @@ -2693,7 +2727,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Intent not present for lcid\n", __func__, riid, size); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EAGAIN; } if (is_atomic && !(ctx->transport_ptr->capabilities & Loading @@ -2701,7 +2735,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, GLINK_ERR_CH(ctx, "%s: Cannot request intent in atomic context\n", __func__); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } Loading @@ -2712,12 +2746,14 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (ret) { GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n", __func__, ret); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return ret; } while (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) { rwref_get(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); if (is_atomic) { GLINK_ERR_CH(ctx, "%s Intent of size %zu not ready\n", Loading Loading @@ -2767,6 +2803,8 @@ static int glink_tx_common(void *handle, void *pkt_priv, } reinit_completion(&ctx->int_req_complete); rwref_read_get(&ctx->ch_state_lhb2); rwref_put(&ctx->ch_state_lhb2); } } Loading @@ -2786,7 +2824,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!tx_info) { GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__); ch_push_remote_rx_intent(ctx, intent_size, riid); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -ENOMEM; } rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release); Loading @@ -2812,7 +2850,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, else xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return ret; } Loading Loading @@ -3859,6 +3897,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( xprt_ptr->local_state = GLINK_XPRT_DOWN; xprt_ptr->remote_neg_completed = false; INIT_LIST_HEAD(&xprt_ptr->channels); xprt_ptr->dummy_in_use = true; spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3); mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4); return xprt_ptr; Loading @@ -3884,41 +3923,49 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) return; } rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels, port_list_node) { rwref_get(&ctx->ch_state_lhb2); rwref_write_get_atomic(&ctx->ch_state_lhb2, true); if (ctx->local_open_state == GLINK_CHANNEL_OPENED || ctx->local_open_state == GLINK_CHANNEL_OPENING) { rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels); spin_unlock_irqrestore( &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); ctx->transport_ptr = dummy_xprt_ctx; rwref_write_put(&ctx->ch_state_lhb2); } else { /* local state is in either CLOSED or CLOSING */ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); glink_core_remote_close_common(ctx); spin_unlock_irqrestore( &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); glink_core_remote_close_common(ctx, true); if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) glink_core_ch_close_ack_common(ctx); glink_core_ch_close_ack_common(ctx, true); /* Channel should be fully closed now. Delete here */ if (ch_is_fully_closed(ctx)) glink_delete_ch_from_list(ctx, false); rwref_write_put(&ctx->ch_state_lhb2); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); } rwref_put(&ctx->ch_state_lhb2); } list_for_each_entry_safe(temp_lcid, temp_lcid1, &xprt_ptr->free_lcid_list, list_node) { list_del(&temp_lcid->list_node); kfree(&temp_lcid->list_node); } dummy_xprt_ctx->dummy_in_use = false; spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_read_put(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels, Loading @@ -3926,13 +3973,13 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) rwref_get(&ctx->ch_state_lhb2); spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); glink_core_remote_close_common(ctx); glink_core_remote_close_common(ctx, false); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_put(&ctx->ch_state_lhb2); } spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_put(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0); } /** * glink_core_rx_cmd_version() - receive version/features from remote system Loading Loading @@ -4597,7 +4644,7 @@ static void glink_core_rx_cmd_ch_remote_close( } GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__); is_ch_fully_closed = glink_core_remote_close_common(ctx); is_ch_fully_closed = glink_core_remote_close_common(ctx, false); ctx->pending_delete = true; if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid); Loading Loading @@ -4639,7 +4686,7 @@ static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr, return; } is_ch_fully_closed = glink_core_ch_close_ack_common(ctx); is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false); if (is_ch_fully_closed) { glink_delete_ch_from_list(ctx, true); flush_kthread_worker(&xprt_ptr->tx_wq); Loading drivers/soc/qcom/glink_private.h +45 −11 Original line number Diff line number Diff line /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and Loading @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/sched.h> #include <soc/qcom/glink.h> struct glink_core_xprt_ctx; Loading Loading @@ -889,7 +890,7 @@ struct rwref_lock { unsigned read_count; unsigned write_count; spinlock_t lock; struct completion count_zero; wait_queue_head_t count_zero; void (*release)(struct rwref_lock *); }; Loading Loading @@ -923,7 +924,7 @@ static inline void rwref_lock_init(struct rwref_lock *lock_ptr, lock_ptr->read_count = 0; lock_ptr->write_count = 0; spin_lock_init(&lock_ptr->lock); init_completion(&lock_ptr->count_zero); init_waitqueue_head(&lock_ptr->count_zero); lock_ptr->release = release; } Loading Loading @@ -952,12 +953,14 @@ static inline void rwref_put(struct rwref_lock *lock_ptr) } /** * rwref_read_get() - gains a reference count for a read operation * rwref_read_get_atomic() - gains a reference count for a read operation * lock_ptr: pointer to lock structure * is_atomic: if True, do not wait when acquiring lock * * Multiple readers may acquire the lock as long as the write count is zero. */ static inline void rwref_read_get(struct rwref_lock *lock_ptr) static inline void rwref_read_get_atomic(struct rwref_lock *lock_ptr, bool is_atomic) { unsigned long flags; Loading @@ -972,8 +975,22 @@ static inline void rwref_read_get(struct rwref_lock *lock_ptr) break; } spin_unlock_irqrestore(&lock_ptr->lock, flags); wait_for_completion(&lock_ptr->count_zero); if (!is_atomic) { wait_event(lock_ptr->count_zero, lock_ptr->write_count == 0); } } } /** * rwref_read_get() - gains a reference count for a read operation * lock_ptr: pointer to lock structure * * Multiple readers may acquire the lock as long as the write count is zero. */ static inline void rwref_read_get(struct rwref_lock *lock_ptr) { rwref_read_get_atomic(lock_ptr, false); } /** Loading @@ -991,18 +1008,20 @@ static inline void rwref_read_put(struct rwref_lock *lock_ptr) spin_lock_irqsave(&lock_ptr->lock, flags); BUG_ON(lock_ptr->read_count == 0); if (--lock_ptr->read_count == 0) complete(&lock_ptr->count_zero); wake_up(&lock_ptr->count_zero); spin_unlock_irqrestore(&lock_ptr->lock, flags); kref_put(&lock_ptr->kref, rwref_lock_release); } /** * rwref_write_get() - gains a reference count for a write operation * rwref_write_get_atomic() - gains a reference count for a write operation * lock_ptr: pointer to lock structure * is_atomic: if True, do not wait when acquiring lock * * Only one writer may acquire the lock as long as the reader count is zero. */ static inline void rwref_write_get(struct rwref_lock *lock_ptr) static inline void rwref_write_get_atomic(struct rwref_lock *lock_ptr, bool is_atomic) { unsigned long flags; Loading @@ -1017,8 +1036,23 @@ static inline void rwref_write_get(struct rwref_lock *lock_ptr) break; } spin_unlock_irqrestore(&lock_ptr->lock, flags); wait_for_completion(&lock_ptr->count_zero); if (!is_atomic) { wait_event(lock_ptr->count_zero, (lock_ptr->read_count == 0 && lock_ptr->write_count == 0)); } } } /** * rwref_write_get() - gains a reference count for a write operation * lock_ptr: pointer to lock structure * * Only one writer may acquire the lock as long as the reader count is zero. */ static inline void rwref_write_get(struct rwref_lock *lock_ptr) { rwref_write_get_atomic(lock_ptr, false); } /** Loading @@ -1036,7 +1070,7 @@ static inline void rwref_write_put(struct rwref_lock *lock_ptr) spin_lock_irqsave(&lock_ptr->lock, flags); BUG_ON(lock_ptr->write_count != 1); if (--lock_ptr->write_count == 0) complete(&lock_ptr->count_zero); wake_up(&lock_ptr->count_zero); spin_unlock_irqrestore(&lock_ptr->lock, flags); kref_put(&lock_ptr->kref, rwref_lock_release); } Loading Loading
drivers/soc/qcom/glink.c +92 −45 Original line number Diff line number Diff line Loading @@ -84,6 +84,7 @@ struct glink_qos_priority_bin { * @tx_wq: workqueue to run @tx_kwork * @tx_task: handle to the running kthread * @channels: list of all existing channels on this transport * @dummy_in_use: True when channels are being migrated to dummy. * @mtu: MTU supported by this transport. * @token_count: Number of tokens to be assigned per assignment. * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests. Loading Loading @@ -119,6 +120,7 @@ struct glink_core_xprt_ctx { struct list_head channels; uint32_t next_lcid; struct list_head free_lcid_list; bool dummy_in_use; uint32_t max_cid; uint32_t max_iid; Loading Loading @@ -393,7 +395,7 @@ static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx, static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if *if_ptr, uint32_t rcid, bool granted); static bool glink_core_remote_close_common(struct channel_ctx *ctx); static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe); static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr, enum glink_link_state link_state); Loading Loading @@ -466,17 +468,26 @@ EXPORT_SYMBOL(glink_ssr); * glink_core_ch_close_ack_common() - handles the common operations during * close ack. * @ctx: Pointer to channel instance. * @is_safe: Is function called while holding ctx lock * * Return: True if the channel is fully closed after the state change, * false otherwise. */ static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx) static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe) { bool is_fully_closed; if (ctx == NULL) return false; is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED); if (safe) { ctx->local_open_state = GLINK_CHANNEL_CLOSED; is_fully_closed = ch_is_fully_closed(ctx); } else { is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED); } GLINK_INFO_PERF_CH(ctx, "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n", __func__); Loading @@ -497,17 +508,23 @@ static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx) * glink_core_remote_close_common() - Handles the common operations during * a remote close. * @ctx: Pointer to channel instance. * * @safe: Is function called with ctx rwref lock already acquired. * Return: True if the channel is fully closed after the state change, * false otherwise. */ static bool glink_core_remote_close_common(struct channel_ctx *ctx) static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe) { bool is_fully_closed; if (ctx == NULL) return false; if (safe) { ctx->remote_opened = false; is_fully_closed = ch_is_fully_closed(ctx); } else { is_fully_closed = ch_update_rmt_state(ctx, false); } ctx->rcid = 0; if (ctx->local_open_state != GLINK_CHANNEL_CLOSED && Loading Loading @@ -2508,14 +2525,20 @@ EXPORT_SYMBOL(glink_get_channel_name_for_handle); * information associated with it. It also adds the channel lcid to the free * lcid list except if the channel is deleted in case of ssr/unregister case. * It can only called when channel is fully closed. * * Return: true when transport_ptr->channels is empty. */ static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) { unsigned long flags; bool ret = false; spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); if (!list_empty(&ctx->port_list_node)) list_del_init(&ctx->port_list_node); if (list_empty(&ctx->transport_ptr->channels)) ret = true; spin_unlock_irqrestore( &ctx->transport_ptr->xprt_ctx_lock_lhb1, flags); Loading @@ -2525,6 +2548,7 @@ static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid) glink_debugfs_remove_channel(ctx, ctx->transport_ptr); mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4); rwref_put(&ctx->ch_state_lhb2); return ret; } /** Loading @@ -2544,6 +2568,7 @@ int glink_close(void *handle) struct channel_ctx *ctx = (struct channel_ctx *)handle; int ret; unsigned long flags; bool is_empty = false; if (!ctx) return -EINVAL; Loading @@ -2557,6 +2582,16 @@ int glink_close(void *handle) return -EBUSY; } rwref_get(&ctx->ch_state_lhb2); relock: xprt_ctx = ctx->transport_ptr; rwref_read_get(&xprt_ctx->xprt_state_lhb0); rwref_write_get(&ctx->ch_state_lhb2); if (xprt_ctx != ctx->transport_ptr) { rwref_write_put(&ctx->ch_state_lhb2); rwref_read_put(&xprt_ctx->xprt_state_lhb0); goto relock; } /* Set the channel state before removing it from xprt's list(s) */ GLINK_INFO_PERF_CH(ctx, "%s: local:%u->GLINK_CHANNEL_CLOSING\n", Loading @@ -2565,33 +2600,29 @@ int glink_close(void *handle) ctx->pending_delete = true; ctx->int_req_ack = false; complete_all(&ctx->int_req_ack_complete); complete_all(&ctx->int_req_complete); spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags); spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags); if (!list_empty(&ctx->tx_ready_list_node)) list_del_init(&ctx->tx_ready_list_node); spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags); spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags); if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) { if (xprt_ctx->local_state != GLINK_XPRT_DOWN) { glink_qos_reset_priority(ctx); ret = ctx->transport_ptr->ops->tx_cmd_ch_close( ctx->transport_ptr->ops, ctx->lcid); } else if (!strcmp(ctx->transport_ptr->name, "dummy")) { ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid); rwref_write_put(&ctx->ch_state_lhb2); } else if (!strcmp(xprt_ctx->name, "dummy")) { /* * This check will avoid any race condition when clients call * glink_close before the dummy xprt swapping happens in link * down scenario. */ ret = 0; xprt_ctx = ctx->transport_ptr; rwref_write_get(&xprt_ctx->xprt_state_lhb0); glink_core_ch_close_ack_common(ctx); rwref_write_put(&ctx->ch_state_lhb2); glink_core_ch_close_ack_common(ctx, false); if (ch_is_fully_closed(ctx)) { glink_delete_ch_from_list(ctx, false); is_empty = glink_delete_ch_from_list(ctx, false); rwref_put(&xprt_ctx->xprt_state_lhb0); if (list_empty(&xprt_ctx->channels)) if (is_empty && !xprt_ctx->dummy_in_use) /* For the xprt reference */ rwref_put(&xprt_ctx->xprt_state_lhb0); } else { Loading @@ -2599,9 +2630,12 @@ int glink_close(void *handle) "channel Not closed yet local state [%d] remote_state [%d]\n", ctx->local_open_state, ctx->remote_opened); } rwref_write_put(&xprt_ctx->xprt_state_lhb0); } complete_all(&ctx->int_req_ack_complete); complete_all(&ctx->int_req_complete); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&xprt_ctx->xprt_state_lhb0); return ret; } EXPORT_SYMBOL(glink_close); Loading Loading @@ -2662,25 +2696,25 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!ctx) return -EINVAL; rwref_get(&ctx->ch_state_lhb2); rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic); if (!(vbuf_provider || pbuf_provider)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } if (!ch_is_fully_opened(ctx)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EBUSY; } if (size > GLINK_MAX_PKT_SIZE) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) { if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) { rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EOPNOTSUPP; } tracer_pkt_log_event(data, GLINK_CORE_TX); Loading @@ -2693,7 +2727,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Intent not present for lcid\n", __func__, riid, size); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EAGAIN; } if (is_atomic && !(ctx->transport_ptr->capabilities & Loading @@ -2701,7 +2735,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, GLINK_ERR_CH(ctx, "%s: Cannot request intent in atomic context\n", __func__); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -EINVAL; } Loading @@ -2712,12 +2746,14 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (ret) { GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n", __func__, ret); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return ret; } while (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) { rwref_get(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); if (is_atomic) { GLINK_ERR_CH(ctx, "%s Intent of size %zu not ready\n", Loading Loading @@ -2767,6 +2803,8 @@ static int glink_tx_common(void *handle, void *pkt_priv, } reinit_completion(&ctx->int_req_complete); rwref_read_get(&ctx->ch_state_lhb2); rwref_put(&ctx->ch_state_lhb2); } } Loading @@ -2786,7 +2824,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!tx_info) { GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__); ch_push_remote_rx_intent(ctx, intent_size, riid); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return -ENOMEM; } rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release); Loading @@ -2812,7 +2850,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, else xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info); rwref_put(&ctx->ch_state_lhb2); rwref_read_put(&ctx->ch_state_lhb2); return ret; } Loading Loading @@ -3859,6 +3897,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( xprt_ptr->local_state = GLINK_XPRT_DOWN; xprt_ptr->remote_neg_completed = false; INIT_LIST_HEAD(&xprt_ptr->channels); xprt_ptr->dummy_in_use = true; spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3); mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4); return xprt_ptr; Loading @@ -3884,41 +3923,49 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) return; } rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels, port_list_node) { rwref_get(&ctx->ch_state_lhb2); rwref_write_get_atomic(&ctx->ch_state_lhb2, true); if (ctx->local_open_state == GLINK_CHANNEL_OPENED || ctx->local_open_state == GLINK_CHANNEL_OPENING) { rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels); spin_unlock_irqrestore( &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); ctx->transport_ptr = dummy_xprt_ctx; rwref_write_put(&ctx->ch_state_lhb2); } else { /* local state is in either CLOSED or CLOSING */ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); glink_core_remote_close_common(ctx); spin_unlock_irqrestore( &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); glink_core_remote_close_common(ctx, true); if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) glink_core_ch_close_ack_common(ctx); glink_core_ch_close_ack_common(ctx, true); /* Channel should be fully closed now. Delete here */ if (ch_is_fully_closed(ctx)) glink_delete_ch_from_list(ctx, false); rwref_write_put(&ctx->ch_state_lhb2); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); } rwref_put(&ctx->ch_state_lhb2); } list_for_each_entry_safe(temp_lcid, temp_lcid1, &xprt_ptr->free_lcid_list, list_node) { list_del(&temp_lcid->list_node); kfree(&temp_lcid->list_node); } dummy_xprt_ctx->dummy_in_use = false; spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_read_put(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels, Loading @@ -3926,13 +3973,13 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) rwref_get(&ctx->ch_state_lhb2); spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); glink_core_remote_close_common(ctx); glink_core_remote_close_common(ctx, false); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_put(&ctx->ch_state_lhb2); } spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_put(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0); } /** * glink_core_rx_cmd_version() - receive version/features from remote system Loading Loading @@ -4597,7 +4644,7 @@ static void glink_core_rx_cmd_ch_remote_close( } GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__); is_ch_fully_closed = glink_core_remote_close_common(ctx); is_ch_fully_closed = glink_core_remote_close_common(ctx, false); ctx->pending_delete = true; if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid); Loading Loading @@ -4639,7 +4686,7 @@ static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr, return; } is_ch_fully_closed = glink_core_ch_close_ack_common(ctx); is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false); if (is_ch_fully_closed) { glink_delete_ch_from_list(ctx, true); flush_kthread_worker(&xprt_ptr->tx_wq); Loading
drivers/soc/qcom/glink_private.h +45 −11 Original line number Diff line number Diff line /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and Loading @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/sched.h> #include <soc/qcom/glink.h> struct glink_core_xprt_ctx; Loading Loading @@ -889,7 +890,7 @@ struct rwref_lock { unsigned read_count; unsigned write_count; spinlock_t lock; struct completion count_zero; wait_queue_head_t count_zero; void (*release)(struct rwref_lock *); }; Loading Loading @@ -923,7 +924,7 @@ static inline void rwref_lock_init(struct rwref_lock *lock_ptr, lock_ptr->read_count = 0; lock_ptr->write_count = 0; spin_lock_init(&lock_ptr->lock); init_completion(&lock_ptr->count_zero); init_waitqueue_head(&lock_ptr->count_zero); lock_ptr->release = release; } Loading Loading @@ -952,12 +953,14 @@ static inline void rwref_put(struct rwref_lock *lock_ptr) } /** * rwref_read_get() - gains a reference count for a read operation * rwref_read_get_atomic() - gains a reference count for a read operation * lock_ptr: pointer to lock structure * is_atomic: if True, do not wait when acquiring lock * * Multiple readers may acquire the lock as long as the write count is zero. */ static inline void rwref_read_get(struct rwref_lock *lock_ptr) static inline void rwref_read_get_atomic(struct rwref_lock *lock_ptr, bool is_atomic) { unsigned long flags; Loading @@ -972,8 +975,22 @@ static inline void rwref_read_get(struct rwref_lock *lock_ptr) break; } spin_unlock_irqrestore(&lock_ptr->lock, flags); wait_for_completion(&lock_ptr->count_zero); if (!is_atomic) { wait_event(lock_ptr->count_zero, lock_ptr->write_count == 0); } } } /** * rwref_read_get() - gains a reference count for a read operation * lock_ptr: pointer to lock structure * * Multiple readers may acquire the lock as long as the write count is zero. */ static inline void rwref_read_get(struct rwref_lock *lock_ptr) { rwref_read_get_atomic(lock_ptr, false); } /** Loading @@ -991,18 +1008,20 @@ static inline void rwref_read_put(struct rwref_lock *lock_ptr) spin_lock_irqsave(&lock_ptr->lock, flags); BUG_ON(lock_ptr->read_count == 0); if (--lock_ptr->read_count == 0) complete(&lock_ptr->count_zero); wake_up(&lock_ptr->count_zero); spin_unlock_irqrestore(&lock_ptr->lock, flags); kref_put(&lock_ptr->kref, rwref_lock_release); } /** * rwref_write_get() - gains a reference count for a write operation * rwref_write_get_atomic() - gains a reference count for a write operation * lock_ptr: pointer to lock structure * is_atomic: if True, do not wait when acquiring lock * * Only one writer may acquire the lock as long as the reader count is zero. */ static inline void rwref_write_get(struct rwref_lock *lock_ptr) static inline void rwref_write_get_atomic(struct rwref_lock *lock_ptr, bool is_atomic) { unsigned long flags; Loading @@ -1017,8 +1036,23 @@ static inline void rwref_write_get(struct rwref_lock *lock_ptr) break; } spin_unlock_irqrestore(&lock_ptr->lock, flags); wait_for_completion(&lock_ptr->count_zero); if (!is_atomic) { wait_event(lock_ptr->count_zero, (lock_ptr->read_count == 0 && lock_ptr->write_count == 0)); } } } /** * rwref_write_get() - gains a reference count for a write operation * lock_ptr: pointer to lock structure * * Only one writer may acquire the lock as long as the reader count is zero. */ static inline void rwref_write_get(struct rwref_lock *lock_ptr) { rwref_write_get_atomic(lock_ptr, false); } /** Loading @@ -1036,7 +1070,7 @@ static inline void rwref_write_put(struct rwref_lock *lock_ptr) spin_lock_irqsave(&lock_ptr->lock, flags); BUG_ON(lock_ptr->write_count != 1); if (--lock_ptr->write_count == 0) complete(&lock_ptr->count_zero); wake_up(&lock_ptr->count_zero); spin_unlock_irqrestore(&lock_ptr->lock, flags); kref_put(&lock_ptr->kref, rwref_lock_release); } Loading