Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 138f4c35 authored by Dan Williams's avatar Dan Williams
Browse files

dmaengine, async_tx: add a "no channel switch" allocator



Channel switching is problematic for some dmaengine drivers as the
architecture precludes separating the ->prep from ->submit.  In these
cases the driver can select ASYNC_TX_DISABLE_CHANNEL_SWITCH to modify
the async_tx allocator to only return channels that support all of the
required asynchronous operations.

For example MD_RAID456=y selects support for asynchronous xor, xor
validate, pq, pq validate, and memcpy.  When
ASYNC_TX_DISABLE_CHANNEL_SWITCH=y any channel with all these
capabilities is marked DMA_ASYNC_TX allowing async_tx_find_channel() to
quickly locate compatible channels with the guarantee that dependency
chains will remain on one channel.  When
ASYNC_TX_DISABLE_CHANNEL_SWITCH=n async_tx_find_channel() may select
channels that lead to operation chains that need to cross channel
boundaries using the async_tx channel switch capability.

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 0403e382
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -81,6 +81,10 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
	struct dma_device *device = chan->device;
	struct dma_device *device = chan->device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;


	#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
	BUG();
	#endif

	/* first check to see if we can still append to depend_tx */
	/* first check to see if we can still append to depend_tx */
	spin_lock_bh(&depend_tx->lock);
	spin_lock_bh(&depend_tx->lock);
	if (depend_tx->parent && depend_tx->chan == tx->chan) {
	if (depend_tx->parent && depend_tx->chan == tx->chan) {
+4 −0
Original line number Original line Diff line number Diff line
@@ -17,11 +17,15 @@ if DMADEVICES


comment "DMA Devices"
comment "DMA Devices"


config ASYNC_TX_DISABLE_CHANNEL_SWITCH
	bool

config INTEL_IOATDMA
config INTEL_IOATDMA
	tristate "Intel I/OAT DMA support"
	tristate "Intel I/OAT DMA support"
	depends on PCI && X86
	depends on PCI && X86
	select DMA_ENGINE
	select DMA_ENGINE
	select DCA
	select DCA
	select ASYNC_TX_DISABLE_CHANNEL_SWITCH
	help
	help
	  Enable support for the Intel(R) I/OAT DMA engine present
	  Enable support for the Intel(R) I/OAT DMA engine present
	  in recent Intel Xeon chipsets.
	  in recent Intel Xeon chipsets.
+40 −0
Original line number Original line Diff line number Diff line
@@ -608,6 +608,40 @@ void dmaengine_put(void)
}
}
EXPORT_SYMBOL(dmaengine_put);
EXPORT_SYMBOL(dmaengine_put);


static bool device_has_all_tx_types(struct dma_device *device)
{
	/* A device that satisfies this test has channels that will never cause
	 * an async_tx channel switch event as all possible operation types can
	 * be handled.
	 */
	#ifdef CONFIG_ASYNC_TX_DMA
	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
	if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
	if (!dma_has_cap(DMA_XOR, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
	if (!dma_has_cap(DMA_PQ, device->cap_mask))
		return false;
	#endif

	return true;
}

static int get_dma_id(struct dma_device *device)
static int get_dma_id(struct dma_device *device)
{
{
	int rc;
	int rc;
@@ -665,6 +699,12 @@ int dma_async_device_register(struct dma_device *device)
	BUG_ON(!device->device_issue_pending);
	BUG_ON(!device->device_issue_pending);
	BUG_ON(!device->dev);
	BUG_ON(!device->dev);


	/* note: this only matters in the
	 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
	 */
	if (device_has_all_tx_types(device))
		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);

	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
	if (!idr_ref)
	if (!idr_ref)
		return -ENOMEM;
		return -ENOMEM;
+9 −1
Original line number Original line Diff line number Diff line
@@ -48,6 +48,9 @@ enum dma_status {


/**
/**
 * enum dma_transaction_type - DMA transaction types/indexes
 * enum dma_transaction_type - DMA transaction types/indexes
 *
 * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
 * automatically set as dma devices are registered.
 */
 */
enum dma_transaction_type {
enum dma_transaction_type {
	DMA_MEMCPY,
	DMA_MEMCPY,
@@ -61,6 +64,7 @@ enum dma_transaction_type {
	DMA_MEMCPY_CRC32C,
	DMA_MEMCPY_CRC32C,
	DMA_INTERRUPT,
	DMA_INTERRUPT,
	DMA_PRIVATE,
	DMA_PRIVATE,
	DMA_ASYNC_TX,
	DMA_SLAVE,
	DMA_SLAVE,
};
};


@@ -396,7 +400,11 @@ static inline void net_dmaengine_put(void)
#ifdef CONFIG_ASYNC_TX_DMA
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get()	dmaengine_get()
#define async_dmaengine_get()	dmaengine_get()
#define async_dmaengine_put()	dmaengine_put()
#define async_dmaengine_put()	dmaengine_put()
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
#else
#define async_dma_find_channel(type) dma_find_channel(type)
#define async_dma_find_channel(type) dma_find_channel(type)
#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
#else
#else
static inline void async_dmaengine_get(void)
static inline void async_dmaengine_get(void)
{
{
@@ -409,7 +417,7 @@ async_dma_find_channel(enum dma_transaction_type type)
{
{
	return NULL;
	return NULL;
}
}
#endif
#endif /* CONFIG_ASYNC_TX_DMA */


dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
	void *dest, void *src, size_t len);
	void *dest, void *src, size_t len);