Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65f5e357 authored by Xin Long's avatar Xin Long Committed by David S. Miller
Browse files

sctp: implement abort_pd for sctp_stream_interleave



abort_pd is added as a member of sctp_stream_interleave, used to abort
partial delivery for data or idata, called in sctp_cmd_assoc_failed.

Since stream interleave allows to do partial delivery for each stream
at the same time, sctp_intl_abort_pd for idata would be very different
from the old function sctp_ulpq_abort_pd for data.

Note that sctp_ulpevent_make_pdapi will support per stream in this
patch by adding pdapi_stream and pdapi_seq in sctp_pdapi_event, as
described in section 6.1.7 of RFC6458.

Signed-off-by: default avatarXin Long <lucien.xin@gmail.com>
Acked-by: default avatarMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Acked-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent be4e0ce1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ struct sctp_stream_interleave {
	void	(*renege_events)(struct sctp_ulpq *ulpq,
				 struct sctp_chunk *chunk, gfp_t gfp);
	void	(*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
	void	(*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
};

void sctp_stream_interleave_init(struct sctp_stream *stream);
+2 −1
Original line number Diff line number Diff line
@@ -122,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(

struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
	const struct sctp_association *asoc,
	__u32 indication, gfp_t gfp);
	__u32 indication, __u32 sid, __u32 seq,
	__u32 flags, gfp_t gfp);

struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
	const struct sctp_association *asoc, gfp_t gfp);
+2 −0
Original line number Diff line number Diff line
@@ -460,6 +460,8 @@ struct sctp_pdapi_event {
	__u32 pdapi_length;
	__u32 pdapi_indication;
	sctp_assoc_t pdapi_assoc_id;
	__u32 pdapi_stream;
	__u32 pdapi_seq;
};

enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
+1 −1
Original line number Diff line number Diff line
@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
	struct sctp_chunk *abort;

	/* Cancel any partial delivery in progress. */
	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
	asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);

	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+99 −0
Original line number Diff line number Diff line
@@ -652,6 +652,103 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
	sk_mem_reclaim(asoc->base.sk);
}

static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
				      __u32 mid, __u16 flags, gfp_t gfp)
{
	struct sock *sk = ulpq->asoc->base.sk;
	struct sctp_ulpevent *ev = NULL;

	if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
					&sctp_sk(sk)->subscribe))
		return;

	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
				      sid, mid, flags, gfp);
	if (ev) {
		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));

		if (!sctp_sk(sk)->data_ready_signalled) {
			sctp_sk(sk)->data_ready_signalled = 1;
			sk->sk_data_ready(sk);
		}
	}
}

static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
	struct sctp_stream *stream = &ulpq->asoc->stream;
	struct sctp_ulpevent *cevent, *event = NULL;
	struct sk_buff_head *lobby = &ulpq->lobby;
	struct sk_buff *pos, *tmp;
	struct sk_buff_head temp;
	__u16 csid;
	__u32 cmid;

	skb_queue_head_init(&temp);
	sctp_skb_for_each(pos, lobby, tmp) {
		cevent = (struct sctp_ulpevent *)pos->cb;
		csid = cevent->stream;
		cmid = cevent->mid;

		if (csid > sid)
			break;

		if (csid < sid)
			continue;

		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
			break;

		__skb_unlink(pos, lobby);
		if (!event)
			event = sctp_skb2event(pos);

		__skb_queue_tail(&temp, pos);
	}

	if (!event && pos != (struct sk_buff *)lobby) {
		cevent = (struct sctp_ulpevent *)pos->cb;
		csid = cevent->stream;
		cmid = cevent->mid;

		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
			sctp_mid_next(stream, in, csid);
			__skb_unlink(pos, lobby);
			__skb_queue_tail(&temp, pos);
			event = sctp_skb2event(pos);
		}
	}

	if (event) {
		sctp_intl_retrieve_ordered(ulpq, event);
		sctp_enqueue_event(ulpq, event);
	}
}

static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
	struct sctp_stream *stream = &ulpq->asoc->stream;
	__u16 sid;

	for (sid = 0; sid < stream->incnt; sid++) {
		struct sctp_stream_in *sin = &stream->in[sid];
		__u32 mid;

		if (sin->pd_mode) {
			sin->pd_mode = 0;

			mid = sin->mid;
			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
			sctp_mid_skip(stream, in, sid, mid);

			sctp_intl_reap_ordered(ulpq, sid);
		}
	}

	/* intl abort pd happens only when all data needs to be cleaned */
	sctp_ulpq_flush(ulpq);
}

static struct sctp_stream_interleave sctp_stream_interleave_0 = {
	.data_chunk_len		= sizeof(struct sctp_data_chunk),
	/* DATA process functions */
@@ -662,6 +759,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
	.enqueue_event		= sctp_ulpq_tail_event,
	.renege_events		= sctp_ulpq_renege,
	.start_pd		= sctp_ulpq_partial_delivery,
	.abort_pd		= sctp_ulpq_abort_pd,
};

static struct sctp_stream_interleave sctp_stream_interleave_1 = {
@@ -674,6 +772,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
	.enqueue_event		= sctp_enqueue_event,
	.renege_events		= sctp_renege_events,
	.start_pd		= sctp_intl_start_pd,
	.abort_pd		= sctp_intl_abort_pd,
};

void sctp_stream_interleave_init(struct sctp_stream *stream)
Loading