Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10b0a773 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'for-davem' of git://gitorious.org/linux-can/linux-can-next

parents d3bd3a5e 5d91efa8
Loading
Loading
Loading
Loading
+33 −57
Original line number Diff line number Diff line
@@ -444,11 +444,14 @@ static int cgw_notifier(struct notifier_block *nb,
	return NOTIFY_DONE;
}

static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
		       u32 pid, u32 seq, int flags)
{
	struct cgw_frame_mod mb;
	struct rtcanmsg *rtcan;
	struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
	struct nlmsghdr *nlh;

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
	if (!nlh)
		return -EMSGSIZE;

@@ -462,15 +465,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
	if (gwj->handled_frames) {
		if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
	}

	if (gwj->dropped_frames) {
		if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
	}

	/* check non default settings of attributes */
@@ -480,8 +479,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
		mb.modtype = gwj->mod.modtype.and;
		if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
	}

	if (gwj->mod.modtype.or) {
@@ -489,8 +486,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
		mb.modtype = gwj->mod.modtype.or;
		if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
	}

	if (gwj->mod.modtype.xor) {
@@ -498,8 +493,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
		mb.modtype = gwj->mod.modtype.xor;
		if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
	}

	if (gwj->mod.modtype.set) {
@@ -507,26 +500,18 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
		mb.modtype = gwj->mod.modtype.set;
		if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
	}

	if (gwj->mod.csumfunc.crc8) {
		if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
			    &gwj->mod.csum.crc8) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + \
				NLA_ALIGN(CGW_CS_CRC8_LEN);
	}

	if (gwj->mod.csumfunc.xor) {
		if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
			    &gwj->mod.csum.xor) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + \
				NLA_ALIGN(CGW_CS_XOR_LEN);
	}

	if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
@@ -535,23 +520,16 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
			if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
				    &gwj->ccgw.filter) < 0)
				goto cancel;
			else
				nlh->nlmsg_len += NLA_HDRLEN +
					NLA_ALIGN(sizeof(struct can_filter));
		}

		if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));

		if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
			goto cancel;
		else
			nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
	}

	return skb->len;
	return nlmsg_end(skb, nlh);

cancel:
	nlmsg_cancel(skb, nlh);
@@ -571,7 +549,8 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
		if (idx < s_idx)
			goto cont;

		if (cgw_put_job(skb, gwj) < 0)
		if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
		    cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
			break;
cont:
		idx++;
@@ -583,6 +562,18 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
	return skb->len;
}

static const struct nla_policy cgw_policy[CGW_MAX+1] = {
	[CGW_MOD_AND]	= { .len = sizeof(struct cgw_frame_mod) },
	[CGW_MOD_OR]	= { .len = sizeof(struct cgw_frame_mod) },
	[CGW_MOD_XOR]	= { .len = sizeof(struct cgw_frame_mod) },
	[CGW_MOD_SET]	= { .len = sizeof(struct cgw_frame_mod) },
	[CGW_CS_XOR]	= { .len = sizeof(struct cgw_csum_xor) },
	[CGW_CS_CRC8]	= { .len = sizeof(struct cgw_csum_crc8) },
	[CGW_SRC_IF]	= { .type = NLA_U32 },
	[CGW_DST_IF]	= { .type = NLA_U32 },
	[CGW_FILTER]	= { .len = sizeof(struct can_filter) },
};

/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
			  u8 gwtype, void *gwtypeattr)
@@ -595,14 +586,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
	/* initialize modification & checksum data space */
	memset(mod, 0, sizeof(*mod));

	err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
	err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX,
			  cgw_policy);
	if (err < 0)
		return err;

	/* check for AND/OR/XOR/SET modifications */

	if (tb[CGW_MOD_AND] &&
	    nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
	if (tb[CGW_MOD_AND]) {
		nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);

		canframecpy(&mod->modframe.and, &mb.cf);
@@ -618,8 +609,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
			mod->modfunc[modidx++] = mod_and_data;
	}

	if (tb[CGW_MOD_OR] &&
	    nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
	if (tb[CGW_MOD_OR]) {
		nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);

		canframecpy(&mod->modframe.or, &mb.cf);
@@ -635,8 +625,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
			mod->modfunc[modidx++] = mod_or_data;
	}

	if (tb[CGW_MOD_XOR] &&
	    nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
	if (tb[CGW_MOD_XOR]) {
		nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);

		canframecpy(&mod->modframe.xor, &mb.cf);
@@ -652,8 +641,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
			mod->modfunc[modidx++] = mod_xor_data;
	}

	if (tb[CGW_MOD_SET] &&
	    nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
	if (tb[CGW_MOD_SET]) {
		nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);

		canframecpy(&mod->modframe.set, &mb.cf);
@@ -672,11 +660,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
	/* check for checksum operations after CAN frame modifications */
	if (modidx) {

		if (tb[CGW_CS_CRC8] &&
		    nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {

			struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
				nla_data(tb[CGW_CS_CRC8]);
		if (tb[CGW_CS_CRC8]) {
			struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);

			err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
						 c->result_idx);
@@ -699,11 +684,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
				mod->csumfunc.crc8 = cgw_csum_crc8_neg;
		}

		if (tb[CGW_CS_XOR] &&
		    nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {

			struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
				nla_data(tb[CGW_CS_XOR]);
		if (tb[CGW_CS_XOR]) {
			struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);

			err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
						 c->result_idx);
@@ -735,8 +717,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
		memset(ccgw, 0, sizeof(*ccgw));

		/* check for can_filter in attributes */
		if (tb[CGW_FILTER] &&
		    nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
		if (tb[CGW_FILTER])
			nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
				   sizeof(struct can_filter));

@@ -746,13 +727,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
		if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
			return err;

		if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
			nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
				   sizeof(u32));

		if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
			nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
				   sizeof(u32));
		ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
		ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);

		/* both indices set to 0 for flushing all routing entries */
		if (!ccgw->src_idx && !ccgw->dst_idx)