Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a487b670 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md: (97 commits)
  md: raid-1/10: fix RW bits manipulation
  md: remove unnecessary memset from multipath.
  md: report device as congested when suspended
  md: Improve name of threads created by md_register_thread
  md: remove sparse warnings about lock context.
  md: remove sparse waring "symbol xxx shadows an earlier one"
  async_tx/raid6: add missing dma_unmap calls to the async fail case
  ioat3: fix uninitialized var warnings
  drivers/dma/ioat/dma_v2.c: fix warnings
  raid6test: fix stack overflow
  ioat2: clarify ring size limits
  md/raid6: cleanup ops_run_compute6_2
  md/raid6: eliminate BUG_ON with side effect
  dca: module load should not be an error message
  ioat: driver version 4.0
  dca: registering requesters in multiple dca domains
  async_tx: remove HIGHMEM64G restriction
  dmaengine: sh: Add Support SuperH DMA Engine driver
  dmaengine: Move all map_sg/unmap_sg for slave channel to its client
  fsldma: Add DMA_SLAVE support
  ...
parents 9f6ac785 4b3df566
Loading
Loading
Loading
Loading
+45 −30
Original line number Original line Diff line number Diff line
@@ -54,20 +54,23 @@ features surfaced as a result:


3.1 General format of the API:
3.1 General format of the API:
struct dma_async_tx_descriptor *
struct dma_async_tx_descriptor *
async_<operation>(<op specific parameters>,
async_<operation>(<op specific parameters>, struct async_submit ctl *submit)
		  enum async_tx_flags flags,
        	  struct dma_async_tx_descriptor *dependency,
        	  dma_async_tx_callback callback_routine,
		  void *callback_parameter);


3.2 Supported operations:
3.2 Supported operations:
memcpy  - memory copy between a source and a destination buffer
memcpy  - memory copy between a source and a destination buffer
memset  - fill a destination buffer with a byte value
memset  - fill a destination buffer with a byte value
xor     - xor a series of source buffers and write the result to a
xor     - xor a series of source buffers and write the result to a
	  destination buffer
	  destination buffer
xor_zero_sum - xor a series of source buffers and set a flag if the
xor_val - xor a series of source buffers and set a flag if the
	  result is zero.  The implementation attempts to prevent
	  result is zero.  The implementation attempts to prevent
	  writes to memory
	  writes to memory
pq	- generate the p+q (raid6 syndrome) from a series of source buffers
pq_val  - validate that a p and or q buffer are in sync with a given series of
	  sources
datap	- (raid6_datap_recov) recover a raid6 data block and the p block
	  from the given sources
2data	- (raid6_2data_recov) recover 2 raid6 data blocks from the given
	  sources


3.3 Descriptor management:
3.3 Descriptor management:
The return value is non-NULL and points to a 'descriptor' when the operation
The return value is non-NULL and points to a 'descriptor' when the operation
@@ -80,8 +83,8 @@ acknowledged by the application before the offload engine driver is allowed to
recycle (or free) the descriptor.  A descriptor can be acked by one of the
recycle (or free) the descriptor.  A descriptor can be acked by one of the
following methods:
following methods:
1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
2/ submitting an unacknowledged descriptor as a dependency to another
   descriptor of a new operation.
   async_tx call will implicitly set the acknowledged state.
3/ calling async_tx_ack() on the descriptor.
3/ calling async_tx_ack() on the descriptor.


3.4 When does the operation execute?
3.4 When does the operation execute?
@@ -119,12 +122,14 @@ of an operation.
Perform a xor->copy->xor operation where each operation depends on the
Perform a xor->copy->xor operation where each operation depends on the
result from the previous operation:
result from the previous operation:


void complete_xor_copy_xor(void *param)
void callback(void *param)
{
{
	printk("complete\n");
	struct completion *cmp = param;

	complete(cmp);
}
}


int run_xor_copy_xor(struct page **xor_srcs,
void run_xor_copy_xor(struct page **xor_srcs,
		      int xor_src_cnt,
		      int xor_src_cnt,
		      struct page *xor_dest,
		      struct page *xor_dest,
		      size_t xor_len,
		      size_t xor_len,
@@ -133,16 +138,26 @@ int run_xor_copy_xor(struct page **xor_srcs,
		      size_t copy_len)
		      size_t copy_len)
{
{
	struct dma_async_tx_descriptor *tx;
	struct dma_async_tx_descriptor *tx;
	addr_conv_t addr_conv[xor_src_cnt];
	struct async_submit_ctl submit;
	addr_conv_t addr_conv[NDISKS];
	struct completion cmp;

	init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
			  addr_conv);
	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit)


	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
	submit->depend_tx = tx;
		       ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
	tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
	tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,

			  ASYNC_TX_DEP_ACK, tx, NULL, NULL);
	init_completion(&cmp);
	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
	init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
		       ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
			  callback, &cmp, addr_conv);
		       tx, complete_xor_copy_xor, NULL);
	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit);


	async_tx_issue_pending_all();
	async_tx_issue_pending_all();

	wait_for_completion(&cmp);
}
}


See include/linux/async_tx.h for more information on the flags.  See the
See include/linux/async_tx.h for more information on the flags.  See the
+79 −2
Original line number Original line Diff line number Diff line
@@ -187,11 +187,74 @@ union iop3xx_desc {
	void *ptr;
	void *ptr;
};
};


/* No support for p+q operations */
static inline int
iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
	BUG();
	return 0;
}

static inline void
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
		  unsigned long flags)
{
	BUG();
}

static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
{
	BUG();
}

static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
			 dma_addr_t addr, unsigned char coef)
{
	BUG();
}

static inline int
iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
	BUG();
	return 0;
}

static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
			  unsigned long flags)
{
	BUG();
}

static inline void
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
{
	BUG();
}

#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr

static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
			      dma_addr_t *src)
{
	BUG();
}

static inline int iop_adma_get_max_xor(void)
static inline int iop_adma_get_max_xor(void)
{
{
	return 32;
	return 32;
}
}


static inline int iop_adma_get_max_pq(void)
{
	BUG();
	return 0;
}

static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{
{
	int id = chan->device->id;
	int id = chan->device->id;
@@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
	return slot_cnt;
	return slot_cnt;
}
}


static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
	return 0;
}

static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan)
					struct iop_adma_chan *chan)
{
{
@@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
	return 0;
	return 0;
}
}



static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
					  struct iop_adma_chan *chan)
{
	BUG();
	return 0;
}

static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan)
					struct iop_adma_chan *chan)
{
{
@@ -756,13 +832,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
	hw_desc->src[0] = val;
	hw_desc->src[0] = val;
}
}


static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{
{
	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
	struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
	struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;


	iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
	iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
	return desc_ctrl.zero_result_err;
	return desc_ctrl.zero_result_err << SUM_CHECK_P;
}
}


static inline void iop_chan_append(struct iop_adma_chan *chan)
static inline void iop_chan_append(struct iop_adma_chan *chan)
+3 −0
Original line number Original line Diff line number Diff line
@@ -86,6 +86,7 @@ struct iop_adma_chan {
 * @idx: pool index
 * @idx: pool index
 * @unmap_src_cnt: number of xor sources
 * @unmap_src_cnt: number of xor sources
 * @unmap_len: transaction bytecount
 * @unmap_len: transaction bytecount
 * @tx_list: list of descriptors that are associated with one operation
 * @async_tx: support for the async_tx api
 * @async_tx: support for the async_tx api
 * @group_list: list of slots that make up a multi-descriptor transaction
 * @group_list: list of slots that make up a multi-descriptor transaction
 *	for example transfer lengths larger than the supported hw max
 *	for example transfer lengths larger than the supported hw max
@@ -102,10 +103,12 @@ struct iop_adma_desc_slot {
	u16 idx;
	u16 idx;
	u16 unmap_src_cnt;
	u16 unmap_src_cnt;
	size_t unmap_len;
	size_t unmap_len;
	struct list_head tx_list;
	struct dma_async_tx_descriptor async_tx;
	struct dma_async_tx_descriptor async_tx;
	union {
	union {
		u32 *xor_check_result;
		u32 *xor_check_result;
		u32 *crc32_result;
		u32 *crc32_result;
		u32 *pq_check_result;
	};
	};
};
};


+114 −5
Original line number Original line Diff line number Diff line
@@ -150,6 +150,8 @@ static inline int iop_adma_get_max_xor(void)
	return 16;
	return 16;
}
}


#define iop_adma_get_max_pq iop_adma_get_max_xor

static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{
{
	return __raw_readl(ADMA_ADAR(chan));
	return __raw_readl(ADMA_ADAR(chan));
@@ -211,7 +213,10 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_PQ_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o)
#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o)
#define iop_chan_pq_slot_count iop_chan_xor_slot_count
#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count


static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan)
					struct iop_adma_chan *chan)
@@ -220,6 +225,13 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
	return hw_desc->dest_addr;
	return hw_desc->dest_addr;
}
}


static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
					  struct iop_adma_chan *chan)
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	return hw_desc->q_dest_addr;
}

static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan)
					struct iop_adma_chan *chan)
{
{
@@ -319,6 +331,58 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
	return 1;
	return 1;
}
}


static inline void
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
		  unsigned long flags)
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	union {
		u32 value;
		struct iop13xx_adma_desc_ctrl field;
	} u_desc_ctrl;

	u_desc_ctrl.value = 0;
	u_desc_ctrl.field.src_select = src_cnt - 1;
	u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
	u_desc_ctrl.field.pq_xfer_en = 1;
	u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
	hw_desc->desc_ctrl = u_desc_ctrl.value;
}

static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	union {
		u32 value;
		struct iop13xx_adma_desc_ctrl field;
	} u_desc_ctrl;

	u_desc_ctrl.value = hw_desc->desc_ctrl;
	return u_desc_ctrl.field.pq_xfer_en;
}

static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
			  unsigned long flags)
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	union {
		u32 value;
		struct iop13xx_adma_desc_ctrl field;
	} u_desc_ctrl;

	u_desc_ctrl.value = 0;
	u_desc_ctrl.field.src_select = src_cnt - 1;
	u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
	u_desc_ctrl.field.zero_result = 1;
	u_desc_ctrl.field.status_write_back_en = 1;
	u_desc_ctrl.field.pq_xfer_en = 1;
	u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
	hw_desc->desc_ctrl = u_desc_ctrl.value;
}

static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan,
					struct iop_adma_chan *chan,
					u32 byte_count)
					u32 byte_count)
@@ -351,6 +415,7 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
	}
	}
}
}


#define iop_desc_set_pq_zero_sum_byte_count iop_desc_set_zero_sum_byte_count


static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
					struct iop_adma_chan *chan,
					struct iop_adma_chan *chan,
@@ -361,6 +426,16 @@ static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
	hw_desc->upper_dest_addr = 0;
	hw_desc->upper_dest_addr = 0;
}
}


static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;

	hw_desc->dest_addr = addr[0];
	hw_desc->q_dest_addr = addr[1];
	hw_desc->upper_dest_addr = 0;
}

static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
					dma_addr_t addr)
					dma_addr_t addr)
{
{
@@ -388,6 +463,29 @@ static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
	} while (slot_cnt);
	} while (slot_cnt);
}
}


static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
			 dma_addr_t addr, unsigned char coef)
{
	int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter;
	struct iop13xx_adma_src *src;
	int i = 0;

	do {
		iter = iop_hw_desc_slot_idx(hw_desc, i);
		src = &iter->src[src_idx];
		src->src_addr = addr;
		src->pq_upper_src_addr = 0;
		src->pq_dmlt = coef;
		slot_cnt -= slots_per_op;
		if (slot_cnt) {
			i += slots_per_op;
			addr += IOP_ADMA_PQ_MAX_BYTE_COUNT;
		}
	} while (slot_cnt);
}

static inline void
static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
	struct iop_adma_chan *chan)
	struct iop_adma_chan *chan)
@@ -399,6 +497,15 @@ iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
}
}


#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr
#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr
#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr

static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
			      dma_addr_t *src)
{
	iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]);
	iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]);
}


static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
					u32 next_desc_addr)
					u32 next_desc_addr)
@@ -428,18 +535,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
	hw_desc->block_fill_data = val;
	hw_desc->block_fill_data = val;
}
}


static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{
{
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
	struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
	struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
	struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field;
	struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field;
	enum sum_check_flags flags;


	BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result));
	BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result));


	if (desc_ctrl.pq_xfer_en)
	flags = byte_count.zero_result_err_q << SUM_CHECK_Q;
		return byte_count.zero_result_err_q;
	flags |= byte_count.zero_result_err << SUM_CHECK_P;
	else

		return byte_count.zero_result_err;
	return flags;
}
}


static inline void iop_chan_append(struct iop_adma_chan *chan)
static inline void iop_chan_append(struct iop_adma_chan *chan)
+5 −12
Original line number Original line Diff line number Diff line
@@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void)
			plat_data = &iop13xx_adma_0_data;
			plat_data = &iop13xx_adma_0_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			break;
			break;
		case IOP13XX_INIT_ADMA_1:
		case IOP13XX_INIT_ADMA_1:
@@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void)
			plat_data = &iop13xx_adma_1_data;
			plat_data = &iop13xx_adma_1_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			break;
			break;
		case IOP13XX_INIT_ADMA_2:
		case IOP13XX_INIT_ADMA_2:
@@ -501,14 +497,11 @@ void __init iop13xx_platform_init(void)
			plat_data = &iop13xx_adma_2_data;
			plat_data = &iop13xx_adma_2_data;
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
			dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
			dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask);
			dma_cap_set(DMA_PQ, plat_data->cap_mask);
			dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask);
			dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
			dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask);
			break;
			break;
		}
		}
	}
	}
Loading