Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4dcf0e2 authored by Brice Goglin's avatar Brice Goglin Committed by Dan Williams
Browse files

ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies



Disable RAID on non-Atom platform and remove related fixups such as the
64-byte alignement restriction on legacy DMA operations (introduced in
commit f26df1a1 as a workaround for silicon errata).

Signed-off-by: default avatarBrice Goglin <Brice.Goglin@inria.fr>
Acked-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarJon Mason <jon.mason@intel.com>
Signed-off-by: default avatarDan Williams <djbw@fb.com>
parent e03bc654
Loading
Loading
Loading
Loading
+1 −23
Original line number Original line Diff line number Diff line
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
	dma->device_free_chan_resources = ioat2_free_chan_resources;
	dma->device_free_chan_resources = ioat2_free_chan_resources;


	if (is_xeon_cb32(pdev))
		dma->copy_align = 6;

	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;


	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);


	if (is_bwd_noraid(pdev))
	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);


	/* dca is incompatible with raid operations */
	/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
	if (device->cap & IOAT_CAP_XOR) {
	if (device->cap & IOAT_CAP_XOR) {
		is_raid_device = true;
		is_raid_device = true;
		dma->max_xor = 8;
		dma->max_xor = 8;
		dma->xor_align = 6;


		dma_cap_set(DMA_XOR, dma->cap_mask);
		dma_cap_set(DMA_XOR, dma->cap_mask);
		dma->device_prep_dma_xor = ioat3_prep_xor;
		dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)


		if (device->cap & IOAT_CAP_RAID16SS) {
		if (device->cap & IOAT_CAP_RAID16SS) {
			dma_set_maxpq(dma, 16, 0);
			dma_set_maxpq(dma, 16, 0);
			dma->pq_align = 0;
		} else {
		} else {
			dma_set_maxpq(dma, 8, 0);
			dma_set_maxpq(dma, 8, 0);
			if (is_xeon_cb32(pdev))
				dma->pq_align = 6;
			else
				dma->pq_align = 0;
		}
		}


		if (!(device->cap & IOAT_CAP_XOR)) {
		if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)


			if (device->cap & IOAT_CAP_RAID16SS) {
			if (device->cap & IOAT_CAP_RAID16SS) {
				dma->max_xor = 16;
				dma->max_xor = 16;
				dma->xor_align = 0;
			} else {
			} else {
				dma->max_xor = 8;
				dma->max_xor = 8;
				if (is_xeon_cb32(pdev))
					dma->xor_align = 6;
				else
					dma->xor_align = 0;
			}
			}
		}
		}
	}
	}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
	device->cleanup_fn = ioat3_cleanup_event;
	device->cleanup_fn = ioat3_cleanup_event;
	device->timer_fn = ioat3_timer_event;
	device->timer_fn = ioat3_timer_event;


	if (is_xeon_cb32(pdev)) {
		dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
		dma->device_prep_dma_xor_val = NULL;

		dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
		dma->device_prep_dma_pq_val = NULL;
	}

	/* starting with CB3.3 super extended descriptors are supported */
	/* starting with CB3.3 super extended descriptors are supported */
	if (device->cap & IOAT_CAP_RAID16SS) {
	if (device->cap & IOAT_CAP_RAID16SS) {
		char pool_name[14];
		char pool_name[14];