Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5073345 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  shdma: fix initialization error handling
  ioat3: fix pq completion versus channel deallocation race
  async_tx: build-time toggling of async_{syndrome,xor}_val dma support
  dmaengine: include xor/pq validate in device_has_all_tx_types()
  ioat2,3: report all uncorrectable errors
  ioat3: specify valid address for disabled-Q or disabled-P
  ioat2,3: disable asynchronous error notifications
  ioat3: dca and raid operations are incompatible
  ioat: silence "dca disabled" messages
parents 50b767d0 56adf7e8
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
	select ASYNC_CORE
	select ASYNC_CORE
	select ASYNC_PQ
	select ASYNC_PQ


config ASYNC_TX_DISABLE_PQ_VAL_DMA
	bool

config ASYNC_TX_DISABLE_XOR_VAL_DMA
	bool
+11 −3
Original line number Original line Diff line number Diff line
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
}
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
EXPORT_SYMBOL_GPL(async_gen_syndrome);


static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
{
	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
	return NULL;
	#endif
	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
				     disks, len);
}

/**
/**
 * async_syndrome_val - asynchronously validate a raid6 syndrome
 * async_syndrome_val - asynchronously validate a raid6 syndrome
 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
		   size_t len, enum sum_check_flags *pqres, struct page *spare,
		   size_t len, enum sum_check_flags *pqres, struct page *spare,
		   struct async_submit_ctl *submit)
		   struct async_submit_ctl *submit)
{
{
	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
						      NULL, 0,  blocks, disks,
						      len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx;
	struct dma_async_tx_descriptor *tx;
	unsigned char coefs[disks-2];
	unsigned char coefs[disks-2];
+12 −3
Original line number Original line Diff line number Diff line
@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
		memcmp(a, a + 4, len - 4) == 0);
		memcmp(a, a + 4, len - 4) == 0);
}
}


static inline struct dma_chan *
xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
		 struct page **src_list, int src_cnt, size_t len)
{
	#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
	return NULL;
	#endif
	return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
				     src_cnt, len);
}

/**
/**
 * async_xor_val - attempt a xor parity check with a dma engine.
 * async_xor_val - attempt a xor parity check with a dma engine.
 * @dest: destination page used if the xor is performed synchronously
 * @dest: destination page used if the xor is performed synchronously
@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
	      int src_cnt, size_t len, enum sum_check_flags *result,
	      int src_cnt, size_t len, enum sum_check_flags *result,
	      struct async_submit_ctl *submit)
	      struct async_submit_ctl *submit)
{
{
	struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
	struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
						      &dest, 1, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;
	struct dma_async_tx_descriptor *tx = NULL;
	dma_addr_t *dma_src = NULL;
	dma_addr_t *dma_src = NULL;
+2 −0
Original line number Original line Diff line number Diff line
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
	select DMA_ENGINE
	select DMA_ENGINE
	select DCA
	select DCA
	select ASYNC_TX_DISABLE_CHANNEL_SWITCH
	select ASYNC_TX_DISABLE_CHANNEL_SWITCH
	select ASYNC_TX_DISABLE_PQ_VAL_DMA
	select ASYNC_TX_DISABLE_XOR_VAL_DMA
	help
	help
	  Enable support for the Intel(R) I/OAT DMA engine present
	  Enable support for the Intel(R) I/OAT DMA engine present
	  in recent Intel Xeon chipsets.
	  in recent Intel Xeon chipsets.
+10 −0
Original line number Original line Diff line number Diff line
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
	if (!dma_has_cap(DMA_XOR, device->cap_mask))
	if (!dma_has_cap(DMA_XOR, device->cap_mask))
		return false;
		return false;

	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
		return false;
	#endif
	#endif
	#endif


	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
	if (!dma_has_cap(DMA_PQ, device->cap_mask))
	if (!dma_has_cap(DMA_PQ, device->cap_mask))
		return false;
		return false;

	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
		return false;
	#endif
	#endif
	#endif


	return true;
	return true;
Loading