Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47fcae0d authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

sh: introduce a sh_cacheop_vaddr helper



And use it in the maple bus code to avoid a dma API dependency.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarYoshinori Sato <ysato@users.sourceforge.jp>
parent b2fcb677
Loading
Loading
Loading
Loading
+7 −0
Original line number Original line Diff line number Diff line
@@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr);


void cpu_cache_init(void);
void cpu_cache_init(void);


static inline void *sh_cacheop_vaddr(void *vaddr)
{
	if (__in_29bit_mode())
		vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
	return vaddr;
}

#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
#endif /* __ASM_SH_CACHEFLUSH_H */
+1 −5
Original line number Original line Diff line number Diff line
@@ -74,10 +74,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
void sh_sync_dma_for_device(void *vaddr, size_t size,
void sh_sync_dma_for_device(void *vaddr, size_t size,
		    enum dma_data_direction direction)
		    enum dma_data_direction direction)
{
{
	void *addr;
	void *addr = sh_cacheop_vaddr(vaddr);

	addr = __in_29bit_mode() ?
	       (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;


	switch (direction) {
	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
	case DMA_FROM_DEVICE:		/* invalidate only */
@@ -93,7 +90,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size,
		BUG();
		BUG();
	}
	}
}
}
EXPORT_SYMBOL(sh_sync_dma_for_device);


static int __init memchunk_setup(char *str)
static int __init memchunk_setup(char *str)
{
{
+4 −3
Original line number Original line Diff line number Diff line
@@ -300,8 +300,8 @@ static void maple_send(void)
	mutex_unlock(&maple_wlist_lock);
	mutex_unlock(&maple_wlist_lock);
	if (maple_packets > 0) {
	if (maple_packets > 0) {
		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
			sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
			__flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
					PAGE_SIZE);
	}
	}


finish:
finish:
@@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
			mdev = mq->dev;
			mdev = mq->dev;
			recvbuf = mq->recvbuf->buf;
			recvbuf = mq->recvbuf->buf;
			sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
			__flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
					0x400);
			code = recvbuf[0];
			code = recvbuf[0];
			kfree(mq->sendbuf);
			kfree(mq->sendbuf);
			list_del_init(&mq->list);
			list_del_init(&mq->list);