Loading drivers/bus/mhi/core/mhi_main.c +18 −17 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include <linux/debugfs.h> #include <linux/device.h> Loading Loading @@ -558,18 +558,6 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, mhi_tre->dword[0] = MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; /* * on RSC channel IPA HW has a minimum credit requirement before * switching to DB mode */ n_free_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); n_queued_tre = tre_ring->elements - n_free_tre; read_lock_bh(&mhi_chan->lock); if (mhi_chan->db_cfg.db_mode && n_queued_tre < MHI_RSC_MIN_CREDITS) ring_db = false; read_unlock_bh(&mhi_chan->lock); } else { mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); Loading @@ -587,11 +575,24 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) { read_lock_bh(&mhi_chan->lock); if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { /* * on RSC channel IPA HW has a minimum credit requirement before * switching to DB mode */ n_free_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); n_queued_tre = tre_ring->elements - n_free_tre; if (mhi_chan->db_cfg.db_mode && n_queued_tre < MHI_RSC_MIN_CREDITS) ring_db = false; } if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_bh(&mhi_chan->lock); } read_unlock_bh(&mhi_cntrl->pm_lock); Loading Loading
drivers/bus/mhi/core/mhi_main.c +18 −17 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include <linux/debugfs.h> #include <linux/device.h> Loading Loading @@ -558,18 +558,6 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, mhi_tre->dword[0] = MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; /* * on RSC channel IPA HW has a minimum credit requirement before * switching to DB mode */ n_free_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); n_queued_tre = tre_ring->elements - n_free_tre; read_lock_bh(&mhi_chan->lock); if (mhi_chan->db_cfg.db_mode && n_queued_tre < MHI_RSC_MIN_CREDITS) ring_db = false; read_unlock_bh(&mhi_chan->lock); } else { mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); Loading @@ -587,11 +575,24 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) { read_lock_bh(&mhi_chan->lock); if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { /* * on RSC channel IPA HW has a minimum credit requirement before * switching to DB mode */ n_free_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); n_queued_tre = tre_ring->elements - n_free_tre; if (mhi_chan->db_cfg.db_mode && n_queued_tre < MHI_RSC_MIN_CREDITS) ring_db = false; } if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_bh(&mhi_chan->lock); } read_unlock_bh(&mhi_cntrl->pm_lock); Loading