Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5756d346 authored by Eric Dumazet's avatar Eric Dumazet Committed by Patrick McHardy
Browse files

netfilter: ip_queue: rwlock to spinlock conversion



Converts queue_lock rwlock to a spinlock.

(readlocked part can be changed by reads of integer values)

One atomic operation instead of four per ipq_enqueue_packet() call.

Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent 2bf07482
Loading
Loading
Loading
Loading
+25 −32
Original line number Original line Diff line number Diff line
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);


static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
static DEFINE_RWLOCK(queue_lock);
static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly;
static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int queue_total;
static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
		break;
		break;


	case IPQ_COPY_PACKET:
	case IPQ_COPY_PACKET:
		copy_mode = mode;
		if (range > 0xFFFF)
			range = 0xFFFF;
		copy_range = range;
		copy_range = range;
		if (copy_range > 0xFFFF)
		copy_mode = mode;
			copy_range = 0xFFFF;
		break;
		break;


	default:
	default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
{
{
	struct nf_queue_entry *entry = NULL, *i;
	struct nf_queue_entry *entry = NULL, *i;


	write_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);


	list_for_each_entry(i, &queue_list, list) {
	list_for_each_entry(i, &queue_list, list) {
		if ((unsigned long)i == id) {
		if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
		queue_total--;
		queue_total--;
	}
	}


	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
	return entry;
	return entry;
}
}


@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void
static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
{
	write_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);
	__ipq_flush(cmpfn, data);
	__ipq_flush(cmpfn, data);
	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
}
}


static struct sk_buff *
static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
	struct nlmsghdr *nlh;
	struct nlmsghdr *nlh;
	struct timeval tv;
	struct timeval tv;


	read_lock_bh(&queue_lock);
	switch (ACCESS_ONCE(copy_mode)) {

	switch (copy_mode) {
	case IPQ_COPY_META:
	case IPQ_COPY_META:
	case IPQ_COPY_NONE:
	case IPQ_COPY_NONE:
		size = NLMSG_SPACE(sizeof(*pmsg));
		size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)


	case IPQ_COPY_PACKET:
	case IPQ_COPY_PACKET:
		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
		    (*errp = skb_checksum_help(entry->skb))) {
		    (*errp = skb_checksum_help(entry->skb)))
			read_unlock_bh(&queue_lock);
			return NULL;
			return NULL;
		}

		if (copy_range == 0 || copy_range > entry->skb->len)
		data_len = ACCESS_ONCE(copy_range);
		if (data_len == 0 || data_len > entry->skb->len)
			data_len = entry->skb->len;
			data_len = entry->skb->len;
		else
			data_len = copy_range;


		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
		break;
		break;


	default:
	default:
		*errp = -EINVAL;
		*errp = -EINVAL;
		read_unlock_bh(&queue_lock);
		return NULL;
		return NULL;
	}
	}


	read_unlock_bh(&queue_lock);

	skb = alloc_skb(size, GFP_ATOMIC);
	skb = alloc_skb(size, GFP_ATOMIC);
	if (!skb)
	if (!skb)
		goto nlmsg_failure;
		goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
	if (nskb == NULL)
	if (nskb == NULL)
		return status;
		return status;


	write_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);


	if (!peer_pid)
	if (!peer_pid)
		goto err_out_free_nskb;
		goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)


	__ipq_enqueue_entry(entry);
	__ipq_enqueue_entry(entry);


	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
	return status;
	return status;


err_out_free_nskb:
err_out_free_nskb:
	kfree_skb(nskb);
	kfree_skb(nskb);


err_out_unlock:
err_out_unlock:
	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
	return status;
	return status;
}
}


@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{
{
	int status;
	int status;


	write_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);
	status = __ipq_set_mode(mode, range);
	status = __ipq_set_mode(mode, range);
	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
	return status;
	return status;
}
}


@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
	if (security_netlink_recv(skb, CAP_NET_ADMIN))
	if (security_netlink_recv(skb, CAP_NET_ADMIN))
		RCV_SKB_FAIL(-EPERM);
		RCV_SKB_FAIL(-EPERM);


	write_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);


	if (peer_pid) {
	if (peer_pid) {
		if (peer_pid != pid) {
		if (peer_pid != pid) {
			write_unlock_bh(&queue_lock);
			spin_unlock_bh(&queue_lock);
			RCV_SKB_FAIL(-EBUSY);
			RCV_SKB_FAIL(-EBUSY);
		}
		}
	} else {
	} else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
		peer_pid = pid;
		peer_pid = pid;
	}
	}


	write_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);


	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
				  nlmsglen - NLMSG_LENGTH(0));
				  nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
	struct netlink_notify *n = ptr;
	struct netlink_notify *n = ptr;


	if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
	if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
		write_lock_bh(&queue_lock);
		spin_lock_bh(&queue_lock);
		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
			__ipq_reset();
			__ipq_reset();
		write_unlock_bh(&queue_lock);
		spin_unlock_bh(&queue_lock);
	}
	}
	return NOTIFY_DONE;
	return NOTIFY_DONE;
}
}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
static int ip_queue_show(struct seq_file *m, void *v)
static int ip_queue_show(struct seq_file *m, void *v)
{
{
	read_lock_bh(&queue_lock);
	spin_lock_bh(&queue_lock);


	seq_printf(m,
	seq_printf(m,
		      "Peer PID          : %d\n"
		      "Peer PID          : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
		      queue_dropped,
		      queue_dropped,
		      queue_user_dropped);
		      queue_user_dropped);


	read_unlock_bh(&queue_lock);
	spin_unlock_bh(&queue_lock);
	return 0;
	return 0;
}
}