Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5dfd654 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Don't use shared bluetooth antenna in iwlwifi driver for management
    frames, from Emmanuel Grumbach.

 2) Fix device ID check in ath9k driver, from Felix Fietkau.

 3) Off by one in xen-netback BUG checks, from Dan Carpenter.

 4) Fix IFLA_VF_PORT netlink attribute validation, from Daniel Borkmann.

 5) Fix races in setting peeked bit flag in SKBs during datagram
    receive.  If it's shared we have to clone it otherwise the value can
    easily be corrupted.  Fix from Herbert Xu.

 6) Revert fec clock handling change, causes regressions.  From Fabio
    Estevam.

 7) Fix use after free in fq_codel and sfq packet schedulers, from WANG
    Cong.

 8) ipvlan bug fixes (memory leaks, missing rcu_dereference_bh, etc.)
    from WANG Cong and Konstantin Khlebnikov.

 9) Memory leak in act_bpf packet action, from Alexei Starovoitov.

10) ARM bpf JIT bug fixes from Nicolas Schichan.

11) Fix backwards compat of ANY_LAYOUT in virtio_net driver, from
    Michael S Tsirkin.

12) Destruction of bond with different ARP header types not handled
    correctly, fix from Nikolay Aleksandrov.

13) Revert GRO receive support in ipv6 SIT tunnel driver, causes
    regressions because the GRO packets created cannot be processed
    properly on the GSO side if we forward the frame.  From Herbert Xu.

14) TCCR update race and other fixes to ravb driver from Sergei
    Shtylyov.

15) Fix SKB leaks in caif_queue_rcv_skb(), from Eric Dumazet.

16) Fix panics on packet scheduler filter replace, from Daniel Borkmann.

17) Make sure AF_PACKET sees properly IP headers in defragmented frames
    (via PACKET_FANOUT_FLAG_DEFRAG option), from Edward Hyunkoo Jee.

18) AF_NETLINK cannot hold mutex in RCU callback, fix from Florian
    Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (84 commits)
  ravb: fix ring memory allocation
  net: phy: dp83867: Fix warning check for setting the internal delay
  openvswitch: allocate nr_node_ids flow_stats instead of num_possible_nodes
  netlink: don't hold mutex in rcu callback when releasing mmapd ring
  ARM: net: fix vlan access instructions in ARM JIT.
  ARM: net: handle negative offsets in BPF JIT.
  ARM: net: fix condition for load_order > 0 when translating load instructions.
  tcp: suppress a division by zero warning
  drivers: net: cpsw: remove tx event processing in rx napi poll
  inet: frags: fix defragmented packet's IP header for af_packet
  net: mvneta: fix refilling for Rx DMA buffers
  stmmac: fix setting of driver data in stmmac_dvr_probe
  sched: cls_flow: fix panic on filter replace
  sched: cls_flower: fix panic on filter replace
  sched: cls_bpf: fix panic on filter replace
  net/mdio: fix mdio_bus_match for c45 PHY
  net: ratelimit warnings about dst entry refcount underflow or overflow
  caif: fix leaks and race in caif_queue_rcv_skb()
  qmi_wwan: add the second QMI/network interface for Sierra Wireless MC7305/MC7355
  ravb: fix race updating TCCR
  ...
parents 5a5ca73a d8b48911
Loading
Loading
Loading
Loading
+44 −13
Original line number Original line Diff line number Diff line
@@ -74,31 +74,51 @@ struct jit_ctx {


int bpf_jit_enable __read_mostly;
int bpf_jit_enable __read_mostly;


static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
		      unsigned int size)
{
	void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);

	if (!ptr)
		return -EFAULT;
	memcpy(ret, ptr, size);
	return 0;
}

static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
{
{
	u8 ret;
	u8 ret;
	int err;
	int err;


	if (offset < 0)
		err = call_neg_helper(skb, offset, &ret, 1);
	else
		err = skb_copy_bits(skb, offset, &ret, 1);
		err = skb_copy_bits(skb, offset, &ret, 1);


	return (u64)err << 32 | ret;
	return (u64)err << 32 | ret;
}
}


static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
{
{
	u16 ret;
	u16 ret;
	int err;
	int err;


	if (offset < 0)
		err = call_neg_helper(skb, offset, &ret, 2);
	else
		err = skb_copy_bits(skb, offset, &ret, 2);
		err = skb_copy_bits(skb, offset, &ret, 2);


	return (u64)err << 32 | ntohs(ret);
	return (u64)err << 32 | ntohs(ret);
}
}


static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{
{
	u32 ret;
	u32 ret;
	int err;
	int err;


	if (offset < 0)
		err = call_neg_helper(skb, offset, &ret, 4);
	else
		err = skb_copy_bits(skb, offset, &ret, 4);
		err = skb_copy_bits(skb, offset, &ret, 4);


	return (u64)err << 32 | ntohl(ret);
	return (u64)err << 32 | ntohl(ret);
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
		case BPF_LD | BPF_B | BPF_ABS:
		case BPF_LD | BPF_B | BPF_ABS:
			load_order = 0;
			load_order = 0;
load:
load:
			/* the interpreter will deal with the negative K */
			if ((int)k < 0)
				return -ENOTSUPP;
			emit_mov_i(r_off, k, ctx);
			emit_mov_i(r_off, k, ctx);
load_common:
load_common:
			ctx->seen |= SEEN_DATA | SEEN_CALL;
			ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ static int build_body(struct jit_ctx *ctx)
				emit(ARM_SUB_I(r_scratch, r_skb_hl,
				emit(ARM_SUB_I(r_scratch, r_skb_hl,
					       1 << load_order), ctx);
					       1 << load_order), ctx);
				emit(ARM_CMP_R(r_scratch, r_off), ctx);
				emit(ARM_CMP_R(r_scratch, r_off), ctx);
				condt = ARM_COND_HS;
				condt = ARM_COND_GE;
			} else {
			} else {
				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
				condt = ARM_COND_HI;
				condt = ARM_COND_HI;
			}
			}


			/*
			 * test for negative offset, only if we are
			 * currently scheduled to take the fast
			 * path. this will update the flags so that
			 * the slowpath instruction are ignored if the
			 * offset is negative.
			 *
			 * for loard_order == 0 the HI condition will
			 * make loads at offset 0 take the slow path too.
			 */
			_emit(condt, ARM_CMP_I(r_off, 0), ctx);

			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
			      ctx);
			      ctx);


@@ -860,9 +889,11 @@ static int build_body(struct jit_ctx *ctx)
			off = offsetof(struct sk_buff, vlan_tci);
			off = offsetof(struct sk_buff, vlan_tci);
			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
				OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
				OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
			else
			else {
				OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
				OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
			}
			break;
			break;
		case BPF_ANC | SKF_AD_QUEUE:
		case BPF_ANC | SKF_AD_QUEUE:
			ctx->seen |= SEEN_SKB;
			ctx->seen |= SEEN_SKB;
+5 −6
Original line number Original line Diff line number Diff line
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)


	/* Read Verbose Config Version Info */
	/* Read Verbose Config Version Info */
	skb = btbcm_read_verbose_config(hdev);
	skb = btbcm_read_verbose_config(hdev);
	if (IS_ERR(skb))
	if (!IS_ERR(skb)) {
		return PTR_ERR(skb);

		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
			get_unaligned_le16(skb->data + 5));
			get_unaligned_le16(skb->data + 5));
		kfree_skb(skb);
		kfree_skb(skb);
	}


	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);


+10 −25
Original line number Original line Diff line number Diff line
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
	cs->hw.ser->tty = tty;
	cs->hw.ser->tty = tty;
	atomic_set(&cs->hw.ser->refcnt, 1);
	atomic_set(&cs->hw.ser->refcnt, 1);
	init_completion(&cs->hw.ser->dead_cmp);
	init_completion(&cs->hw.ser->dead_cmp);

	tty->disc_data = cs;
	tty->disc_data = cs;


	/* Set the amount of data we're willing to receive per call
	 * from the hardware driver to half of the input buffer size
	 * to leave some reserve.
	 * Note: We don't do flow control towards the hardware driver.
	 * If more data is received than will fit into the input buffer,
	 * it will be dropped and an error will be logged. This should
	 * never happen as the device is slow and the buffer size ample.
	 */
	tty->receive_room = RBUFSIZE/2;

	/* OK.. Initialization of the datastructures and the HW is done.. Now
	/* OK.. Initialization of the datastructures and the HW is done.. Now
	 * startup system and notify the LL that we are ready to run
	 * startup system and notify the LL that we are ready to run
	 */
	 */
@@ -597,28 +606,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
	return 0;
	return 0;
}
}


/*
 * Read on the tty.
 * Unused, received data goes only to the Gigaset driver.
 */
static ssize_t
gigaset_tty_read(struct tty_struct *tty, struct file *file,
		 unsigned char __user *buf, size_t count)
{
	return -EAGAIN;
}

/*
 * Write on the tty.
 * Unused, transmit data comes only from the Gigaset driver.
 */
static ssize_t
gigaset_tty_write(struct tty_struct *tty, struct file *file,
		  const unsigned char *buf, size_t count)
{
	return -EAGAIN;
}

/*
/*
 * Ioctl on the tty.
 * Ioctl on the tty.
 * Called in process context only.
 * Called in process context only.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
	.open		= gigaset_tty_open,
	.open		= gigaset_tty_open,
	.close		= gigaset_tty_close,
	.close		= gigaset_tty_close,
	.hangup		= gigaset_tty_hangup,
	.hangup		= gigaset_tty_hangup,
	.read		= gigaset_tty_read,
	.write		= gigaset_tty_write,
	.ioctl		= gigaset_tty_ioctl,
	.ioctl		= gigaset_tty_ioctl,
	.receive_buf	= gigaset_tty_receive,
	.receive_buf	= gigaset_tty_receive,
	.write_wakeup	= gigaset_tty_wakeup,
	.write_wakeup	= gigaset_tty_wakeup,
+31 −3
Original line number Original line Diff line number Diff line
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
}


static struct slave *bond_get_old_active(struct bonding *bond,
					 struct slave *new_active)
{
	struct slave *slave;
	struct list_head *iter;

	bond_for_each_slave(bond, slave, iter) {
		if (slave == new_active)
			continue;

		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
			return slave;
	}

	return NULL;
}

/* bond_do_fail_over_mac
/* bond_do_fail_over_mac
 *
 *
 * Perform special MAC address swapping for fail_over_mac settings
 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
		if (!new_active)
		if (!new_active)
			return;
			return;


		if (!old_active)
			old_active = bond_get_old_active(bond, new_active);

		if (old_active) {
		if (old_active) {
			ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
			ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
			ether_addr_copy(saddr.sa_data,
			ether_addr_copy(saddr.sa_data,
@@ -1725,9 +1745,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)


err_undo_flags:
err_undo_flags:
	/* Enslave of first slave has failed and we need to fix master's mac */
	/* Enslave of first slave has failed and we need to fix master's mac */
	if (!bond_has_slaves(bond) &&
	if (!bond_has_slaves(bond)) {
	    ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
		if (ether_addr_equal_64bits(bond_dev->dev_addr,
					    slave_dev->dev_addr))
			eth_hw_addr_random(bond_dev);
			eth_hw_addr_random(bond_dev);
		if (bond_dev->type != ARPHRD_ETHER) {
			ether_setup(bond_dev);
			bond_dev->flags |= IFF_MASTER;
			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		}
	}


	return res;
	return res;
}
}
@@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
		netdev_info(bond_dev, "Destroying bond %s\n",
		netdev_info(bond_dev, "Destroying bond %s\n",
			    bond_dev->name);
			    bond_dev->name);
		bond_remove_proc_entry(bond);
		unregister_netdevice(bond_dev);
		unregister_netdevice(bond_dev);
	}
	}
	return ret;
	return ret;
+4 −4
Original line number Original line Diff line number Diff line
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)


	cf->can_id |= CAN_ERR_CRTL;
	cf->can_id |= CAN_ERR_CRTL;
	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
	netif_receive_skb(skb);


	stats->rx_packets++;
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;
	stats->rx_bytes += cf->can_dlc;
	netif_receive_skb(skb);
}
}


/**
/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
	}
	}


	at91_read_mb(dev, mb, cf);
	at91_read_mb(dev, mb, cf);
	netif_receive_skb(skb);


	stats->rx_packets++;
	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;
	stats->rx_bytes += cf->can_dlc;
	netif_receive_skb(skb);


	can_led_event(dev, CAN_LED_EVENT_RX);
	can_led_event(dev, CAN_LED_EVENT_RX);
}
}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
		return 0;
		return 0;


	at91_poll_err_frame(dev, cf, reg_sr);
	at91_poll_err_frame(dev, cf, reg_sr);
	netif_receive_skb(skb);


	dev->stats.rx_packets++;
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += cf->can_dlc;
	dev->stats.rx_bytes += cf->can_dlc;
	netif_receive_skb(skb);


	return 1;
	return 1;
}
}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
		return;
		return;


	at91_irq_err_state(dev, cf, new_state);
	at91_irq_err_state(dev, cf, new_state);
	netif_rx(skb);


	dev->stats.rx_packets++;
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += cf->can_dlc;
	dev->stats.rx_bytes += cf->can_dlc;
	netif_rx(skb);


	priv->can.state = new_state;
	priv->can.state = new_state;
}
}
Loading