Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f571515 authored by Atsushi Nemoto's avatar Atsushi Nemoto Committed by Dan Williams
Browse files

dmaengine: Add privatecnt to revert DMA_PRIVATE property



Currently dma_request_channel() set DMA_PRIVATE capability but never
clear it.  So if a public channel was once grabbed by
dma_request_channel(), the device stay PRIVATE forever.  Add
privatecnt member to dma_device to correctly revert it.

[lg@denx.de: fix bad usage of 'chan' in dma_async_device_register]
Signed-off-by: default avatarAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Acked-by: default avatarMaciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent e44e0aa3
Loading
Loading
Loading
Loading
+8 −0
Original line number Original line Diff line number Diff line
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
			 * published in the general-purpose allocator
			 * published in the general-purpose allocator
			 */
			 */
			dma_cap_set(DMA_PRIVATE, device->cap_mask);
			dma_cap_set(DMA_PRIVATE, device->cap_mask);
			device->privatecnt++;
			err = dma_chan_get(chan);
			err = dma_chan_get(chan);


			if (err == -ENODEV) {
			if (err == -ENODEV) {
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
				       dma_chan_name(chan), err);
				       dma_chan_name(chan), err);
			else
			else
				break;
				break;
			if (--device->privatecnt == 0)
				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
			chan->private = NULL;
			chan->private = NULL;
			chan = NULL;
			chan = NULL;
		}
		}
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
	WARN_ONCE(chan->client_count != 1,
	WARN_ONCE(chan->client_count != 1,
		  "chan reference count %d != 1\n", chan->client_count);
		  "chan reference count %d != 1\n", chan->client_count);
	dma_chan_put(chan);
	dma_chan_put(chan);
	/* drop PRIVATE cap enabled by __dma_request_channel() */
	if (--chan->device->privatecnt == 0)
		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
	chan->private = NULL;
	chan->private = NULL;
	mutex_unlock(&dma_list_mutex);
	mutex_unlock(&dma_list_mutex);
}
}
@@ -719,6 +725,8 @@ int dma_async_device_register(struct dma_device *device)
			}
			}
		}
		}
	list_add_tail_rcu(&device->global_node, &dma_device_list);
	list_add_tail_rcu(&device->global_node, &dma_device_list);
	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
		device->privatecnt++;	/* Always private */
	dma_channel_rebalance();
	dma_channel_rebalance();
	mutex_unlock(&dma_list_mutex);
	mutex_unlock(&dma_list_mutex);


+9 −0
Original line number Original line Diff line number Diff line
@@ -202,6 +202,7 @@ struct dma_async_tx_descriptor {
/**
/**
 * struct dma_device - info on the entity supplying DMA services
 * struct dma_device - info on the entity supplying DMA services
 * @chancnt: how many DMA channels are supported
 * @chancnt: how many DMA channels are supported
 * @privatecnt: how many DMA channels are requested by dma_request_channel
 * @channels: the list of struct dma_chan
 * @channels: the list of struct dma_chan
 * @global_node: list_head for global dma_device_list
 * @global_node: list_head for global dma_device_list
 * @cap_mask: one or more dma_capability flags
 * @cap_mask: one or more dma_capability flags
@@ -224,6 +225,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
struct dma_device {


	unsigned int chancnt;
	unsigned int chancnt;
	unsigned int privatecnt;
	struct list_head channels;
	struct list_head channels;
	struct list_head global_node;
	struct list_head global_node;
	dma_cap_mask_t  cap_mask;
	dma_cap_mask_t  cap_mask;
@@ -352,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
	set_bit(tx_type, dstp->bits);
	set_bit(tx_type, dstp->bits);
}
}


#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
static inline void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
	clear_bit(tx_type, dstp->bits);
}

#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{
{