Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca6a09f2 authored by Sjur Braendeland's avatar Sjur Braendeland Committed by David S. Miller
Browse files

caif: Bugfix - Poll can't return POLLHUP while connecting.



Discovered bug when testing async connect.
While connecting poll should not return POLLHUP,
but POLLOUT when connected.
Also fixed the sysfs flow-control-counters.

Signed-off-by: default avatarSjur Braendeland <sjur.brandeland@stericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 638e628a
Loading
Loading
Loading
Loading
+6 −15
Original line number Original line Diff line number Diff line
@@ -138,7 +138,7 @@ void caif_flow_ctrl(struct sock *sk, int mode)
{
{
	struct caifsock *cf_sk;
	struct caifsock *cf_sk;
	cf_sk = container_of(sk, struct caifsock, sk);
	cf_sk = container_of(sk, struct caifsock, sk);
	if (cf_sk->layer.dn)
	if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
		cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
		cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
}
}


@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
			atomic_read(&cf_sk->sk.sk_rmem_alloc),
			atomic_read(&cf_sk->sk.sk_rmem_alloc),
			sk_rcvbuf_lowwater(cf_sk));
			sk_rcvbuf_lowwater(cf_sk));
		set_rx_flow_off(cf_sk);
		set_rx_flow_off(cf_sk);
		if (cf_sk->layer.dn)
		dbfs_atomic_inc(&cnt.num_rx_flow_off);
			cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
						CAIF_MODEMCMD_FLOW_OFF_REQ);
	}
	}


	err = sk_filter(sk, skb);
	err = sk_filter(sk, skb);
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
		trace_printk("CAIF: %s():"
		trace_printk("CAIF: %s():"
			" sending flow OFF due to rmem_schedule\n",
			" sending flow OFF due to rmem_schedule\n",
			__func__);
			__func__);
		if (cf_sk->layer.dn)
		dbfs_atomic_inc(&cnt.num_rx_flow_off);
			cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
						CAIF_MODEMCMD_FLOW_OFF_REQ);
	}
	}
	skb->dev = NULL;
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
	skb_set_owner_r(skb, sk);
@@ -285,16 +283,13 @@ static void caif_check_flow_release(struct sock *sk)
{
{
	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);


	if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
		return;
	if (rx_flow_is_on(cf_sk))
	if (rx_flow_is_on(cf_sk))
		return;
		return;


	if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
	if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
			dbfs_atomic_inc(&cnt.num_rx_flow_on);
			dbfs_atomic_inc(&cnt.num_rx_flow_on);
			set_rx_flow_on(cf_sk);
			set_rx_flow_on(cf_sk);
			cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
			caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
						CAIF_MODEMCMD_FLOW_ON_REQ);
	}
	}
}
}
/*
/*
@@ -1018,10 +1013,6 @@ static unsigned int caif_poll(struct file *file,
		(sk->sk_shutdown & RCV_SHUTDOWN))
		(sk->sk_shutdown & RCV_SHUTDOWN))
		mask |= POLLIN | POLLRDNORM;
		mask |= POLLIN | POLLRDNORM;


	/* Connection-based need to check for termination and startup */
	if (sk->sk_state == CAIF_DISCONNECTED)
		mask |= POLLHUP;

	/*
	/*
	 * we set writable also when the other side has shut down the
	 * we set writable also when the other side has shut down the
	 * connection. This prevents stuck sockets.
	 * connection. This prevents stuck sockets.