Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2bd1140 authored by Dave Jiang's avatar Dave Jiang Committed by Dan Williams
Browse files

netdma: adding alignment check for NETDMA ops



This is the fallout from adding memcpy alignment workaround for certain
IOATDMA hardware. NetDMA will only use DMA engine that can handle byte align
ops.

Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent f26df1a1
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);

/*
 * net_dma_find_channel - find a channel for net_dma
 * net_dma has alignment requirements
 */
struct dma_chan *net_dma_find_channel(void)
{
	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
		return NULL;

	return chan;
}
EXPORT_SYMBOL(net_dma_find_channel);

/**
 * dma_issue_pending_all - flush all pending operations across all channels
 */
+1 −0
Original line number Diff line number Diff line
@@ -948,6 +948,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)

/* --- Helper iov-locking functions --- */
+2 −2
Original line number Diff line number Diff line
@@ -1450,7 +1450,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		if ((available < target) &&
		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
		    !sysctl_tcp_low_latency &&
		    dma_find_channel(DMA_MEMCPY)) {
		    net_dma_find_channel()) {
			preempt_enable_no_resched();
			tp->ucopy.pinned_list =
					dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1665,7 +1665,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
				tp->ucopy.dma_chan = net_dma_find_channel();

			if (tp->ucopy.dma_chan) {
				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
+1 −1
Original line number Diff line number Diff line
@@ -5190,7 +5190,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
		return 0;

	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
		tp->ucopy.dma_chan = net_dma_find_channel();

	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {

+1 −1
Original line number Diff line number Diff line
@@ -1727,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA
		struct tcp_sock *tp = tcp_sk(sk);
		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
			tp->ucopy.dma_chan = net_dma_find_channel();
		if (tp->ucopy.dma_chan)
			ret = tcp_v4_do_rcv(sk, skb);
		else
Loading