Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e903e08 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: add skb frag size accessors



To ease skb->truesize sanitization, its better to be able to localize
all references to skb frags size.

Define accessors : skb_frag_size() to fetch frag size, and
skb_frag_size_{set|add|sub}() to manipulate it.

Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dd767856
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
				put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
				    skb_frag_page(&skb_shinfo(skb)->frags[i]) +
					skb_shinfo(skb)->frags[i].page_offset,
				    skb_shinfo(skb)->frags[i].size);
				    skb_frag_size(&skb_shinfo(skb)->frags[i]));
	}
	if (skb->len & 3)
		put_dma(tx->index,eni_dev->dma,&j,zeroes,4-(skb->len & 3));
+2 −2
Original line number Diff line number Diff line
@@ -800,8 +800,8 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
	/* Loop thru additional data fragments and queue them */
	if (skb_shinfo(skb)->nr_frags) {
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			maplen = frag->size;
			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			maplen = skb_frag_size(frag);
			mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
						   0, maplen, DMA_TO_DEVICE);
			elem = elem->next;
+5 −5
Original line number Diff line number Diff line
@@ -444,10 +444,10 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
			skb_frag_t *frag =
				&skb_shinfo(skb)->frags[skb_fragment_index];
			bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
						       frag, 0, frag->size,
						       frag, 0, skb_frag_size(frag),
						       DMA_TO_DEVICE);
			wqe_fragment_length[wqe_fragment_index] =
					cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
					cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index]));
			set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
				bus_address);
			wqe_fragment_index++;
@@ -565,7 +565,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
					&skb_shinfo(skb)->frags[tso_frag_count];
				tso_bus_address[tso_frag_count] =
					skb_frag_dma_map(&nesdev->pcidev->dev,
							 frag, 0, frag->size,
							 frag, 0, skb_frag_size(frag),
							 DMA_TO_DEVICE);
			}

@@ -637,11 +637,11 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
				}
				while (wqe_fragment_index < 5) {
					wqe_fragment_length[wqe_fragment_index] =
							cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
							cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index]));
					set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
						(u64)tso_bus_address[tso_frag_index]);
					wqe_fragment_index++;
					tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
					tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]);
					if (wqe_fragment_index < 5)
						wqe_fragment_length[wqe_fragment_index] = 0;
					if (tso_frag_index == tso_frag_count)
+1 −1
Original line number Diff line number Diff line
@@ -543,7 +543,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
		} else {
			size = min(length, (unsigned) PAGE_SIZE);

			frag->size = size;
			skb_frag_size_set(frag, size);
			skb->data_len += size;
			skb->truesize += size;
			skb->len += size;
+10 −8
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,

		size = length - IPOIB_UD_HEAD_SIZE;

		frag->size     = size;
		skb_frag_size_set(frag, size);
		skb->data_len += size;
		skb->truesize += size;
	} else
@@ -322,10 +322,10 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
		off = 0;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		mapping[i + off] = ib_dma_map_page(ca,
						 skb_frag_page(frag),
						 frag->page_offset, frag->size,
						 frag->page_offset, skb_frag_size(frag),
						 DMA_TO_DEVICE);
		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
			goto partial_error;
@@ -334,8 +334,9 @@ static int ipoib_dma_map_tx(struct ib_device *ca,

partial_error:
	for (; i > 0; --i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
		ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];

		ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
	}

	if (off)
@@ -359,8 +360,9 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
		off = 0;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		ib_dma_unmap_page(ca, mapping[i + off], frag->size,
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
				  DMA_TO_DEVICE);
	}
}
@@ -510,7 +512,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,

	for (i = 0; i < nr_frags; ++i) {
		priv->tx_sge[i + off].addr = mapping[i + off];
		priv->tx_sge[i + off].length = frags[i].size;
		priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
	}
	priv->tx_wr.num_sge	     = nr_frags + off;
	priv->tx_wr.wr_id 	     = wr_id;
Loading