Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea986679 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Out of bounds access in xfrm IPSEC policy unlink, from Yue Haibing.

 2) Missing length check for esp4 UDP encap, from Sabrina Dubroca.

 3) Fix byte order of RX STBC access in mac80211, from Johannes Berg.

 4) Inifnite loop in bpftool map create, from Alban Crequy.

 5) Register mark fix in ebpf verifier after pkt/null checks, from Paul
    Chaignon.

 6) Properly use rcu_dereference_sk_user_data in L2TP code, from Eric
    Dumazet.

 7) Buffer overrun in marvell phy driver, from Andrew Lunn.

 8) Several crash and statistics handling fixes to bnxt_en driver, from
    Michael Chan and Vasundhara Volam.

 9) Several fixes to the TLS layer from Jakub Kicinski (copying negative
    amounts of data in reencrypt, reencrypt frag copying, blind nskb->sk
    NULL deref, etc).

10) Several UDP GRO fixes, from Paolo Abeni and Eric Dumazet.

11) PID/UID checks on ipv6 flow labels are inverted, from Willem de
    Bruijn.

12) Use after free in l2tp, from Eric Dumazet.

13) IPV6 route destroy races, also from Eric Dumazet.

14) SCTP state machine can erroneously run recursively, fix from Xin
    Long.

15) Adjust AF_PACKET msg_name length checks, add padding bytes if
    necessary. From Willem de Bruijn.

16) Preserve skb_iif, so that forwarded packets have consistent values
    even if fragmentation is involved. From Shmulik Ladkani.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits)
  udp: fix GRO packet of death
  ipv6: A few fixes on dereferencing rt->from
  rds: ib: force endiannes annotation
  selftests: fib_rule_tests: print the result and return 1 if any tests failed
  ipv4: ip_do_fragment: Preserve skb_iif during fragmentation
  net/tls: avoid NULL pointer deref on nskb->sk in fallback
  selftests: fib_rule_tests: Fix icmp proto with ipv6
  packet: validate msg_namelen in send directly
  packet: in recvmsg msg_name return at least sizeof sockaddr_ll
  sctp: avoid running the sctp state machine recursively
  stmmac: pci: Fix typo in IOT2000 comment
  Documentation: fix netdev-FAQ.rst markup warning
  ipv6: fix races in ip6_dst_destroy()
  l2ip: fix possible use-after-free
  appletalk: Set error code if register_snap_client failed
  net: dsa: bcm_sf2: fix buffer overflow doing set_rxnfc
  rxrpc: Fix net namespace cleanup
  ipv6/flowlabel: wait rcu grace period before put_pid()
  vrf: Use orig netdev to count Ip6InNoRoutes and a fresh route lookup when sending dest unreach
  tcp: add sanity tests in tcp_add_backlog()
  ...
parents 5ce3307b 4dd2b82d
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1337,6 +1337,7 @@ tag - INTEGER
	Default value is 0.

xfrm4_gc_thresh - INTEGER
	(Obsolete since linux-4.14)
	The threshold at which we will start garbage collecting for IPv4
	destination cache entries.  At twice this value the system will
	refuse new allocations.
@@ -1920,6 +1921,7 @@ echo_ignore_all - BOOLEAN
	Default: 0

xfrm6_gc_thresh - INTEGER
	(Obsolete since linux-4.14)
	The threshold at which we will start garbage collecting for IPv6
	destination cache entries.  At twice this value the system will
	refuse new allocations.
+1 −1
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@ version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.

Q: I made changes to only a few patches in a patch series should I resend only those changed?
--------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------
A: No, please resend the entire patch series and make sure you do number your
patches such that it is clear this is the latest and greatest set of patches
that can be applied.
+3 −2
Original line number Diff line number Diff line
@@ -186,7 +186,8 @@ enum which_ebpf_reg {
 * separate frame pointer, so BPF_REG_10 relative accesses are
 * adjusted to be $sp relative.
 */
int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
static int ebpf_to_mips_reg(struct jit_ctx *ctx,
			    const struct bpf_insn *insn,
			    enum which_ebpf_reg w)
{
	int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+6 −0
Original line number Diff line number Diff line
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
	     fs->m_ext.data[1]))
		return -EINVAL;

	if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
		return -EINVAL;

	if (fs->location != RX_CLS_LOC_ANY &&
	    test_bit(fs->location, priv->cfp.used))
		return -EBUSY;
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
	struct cfp_rule *rule;
	int ret;

	if (loc >= CFP_NUM_RULES)
		return -EINVAL;

	/* Refuse deleting unused rules, and those that are not unique since
	 * that could leave IPv6 rules with one of the chained rule in the
	 * table.
+32 −21
Original line number Diff line number Diff line
@@ -1625,7 +1625,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
			netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
			bnxt_sched_reset(bp, rxr);
		}
		goto next_rx;
		goto next_rx_no_len;
	}

	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1706,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
	rc = 1;

next_rx:
	rxr->rx_prod = NEXT_RX(prod);
	rxr->rx_next_cons = NEXT_RX(cons);

	cpr->rx_packets += 1;
	cpr->rx_bytes += len;

next_rx_no_len:
	rxr->rx_prod = NEXT_RX(prod);
	rxr->rx_next_cons = NEXT_RX(cons);

next_rx_no_prod_no_len:
	*raw_cons = tmp_raw_cons;

@@ -5135,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
	for (i = 0; i < bp->tx_nr_rings; i++) {
		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
		u32 cmpl_ring_id;

		cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);

			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_TX,
						close_path ? cmpl_ring_id :
@@ -5151,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
		u32 grp_idx = rxr->bnapi->index;
		u32 cmpl_ring_id;

		cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);

			hwrm_ring_free_send_msg(bp, ring,
						RING_FREE_REQ_RING_TYPE_RX,
						close_path ? cmpl_ring_id :
@@ -5173,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
		u32 grp_idx = rxr->bnapi->index;
		u32 cmpl_ring_id;

		cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);

			hwrm_ring_free_send_msg(bp, ring, type,
						close_path ? cmpl_ring_id :
						INVALID_HW_RING_ID);
@@ -5315,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
	req->num_tx_rings = cpu_to_le16(tx_rings);
	if (BNXT_NEW_RM(bp)) {
		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
		enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
		if (bp->flags & BNXT_FLAG_CHIP_P5) {
			enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
			enables |= tx_rings + ring_grps ?
				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
			enables |= rx_rings ?
				FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
		} else {
			enables |= cp_rings ?
				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
			enables |= ring_grps ?
				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5365,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
	enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
	if (bp->flags & BNXT_FLAG_CHIP_P5) {
		enables |= tx_rings + ring_grps ?
			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
			   FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
	} else {
		enables |= cp_rings ?
			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
			   FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
		enables |= ring_grps ?
			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
	}
@@ -6753,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
	struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
	struct hwrm_port_qstats_ext_input req = {0};
	struct bnxt_pf_info *pf = &bp->pf;
	u32 tx_stat_size;
	int rc;

	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6762,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
	req.port_id = cpu_to_le16(pf->port_id);
	req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
	req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
	tx_stat_size = bp->hw_tx_port_stats_ext ?
		       sizeof(*bp->hw_tx_port_stats_ext) : 0;
	req.tx_stat_size = cpu_to_le16(tx_stat_size);
	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
	mutex_lock(&bp->hwrm_cmd_lock);
	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
	if (!rc) {
		bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
		bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
		bp->fw_tx_stats_ext_size = tx_stat_size ?
			le16_to_cpu(resp->tx_stat_size) / 8 : 0;
	} else {
		bp->fw_rx_stats_ext_size = 0;
		bp->fw_tx_stats_ext_size = 0;
@@ -8961,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)

skip_uc:
	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
	if (rc && vnic->mc_list_count) {
		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
			    rc);
		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
		vnic->mc_list_count = 0;
		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
	}
	if (rc)
		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
			   rc);

	return rc;
@@ -10685,6 +10695,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	bnxt_clear_int_mode(bp);

init_err_pci_clean:
	bnxt_free_hwrm_short_cmd_req(bp);
	bnxt_free_hwrm_resources(bp);
	bnxt_free_ctx_mem(bp);
	kfree(bp->ctx);
Loading