Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7a9286e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge android-4.4.174 (62872f95) into msm-4.4"

parents 785e337e 95ddd086
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -112,14 +112,11 @@ min_adv_mss - INTEGER

IP Fragmentation:

ipfrag_high_thresh - INTEGER
	Maximum memory used to reassemble IP fragments. When
	ipfrag_high_thresh bytes of memory is allocated for this purpose,
	the fragment handler will toss packets until ipfrag_low_thresh
	is reached. This also serves as a maximum limit to namespaces
	different from the initial one.

ipfrag_low_thresh - INTEGER
ipfrag_high_thresh - LONG INTEGER
	Maximum memory used to reassemble IP fragments.

ipfrag_low_thresh - LONG INTEGER
	(Obsolete since linux-4.17)
	Maximum memory used to reassemble IP fragments before the kernel
	begins to remove incomplete fragment queues to free up resources.
	The kernel still accepts new fragments for defragmentation.
+1 −1
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 173
SUBLEVEL = 174
EXTRAVERSION =
NAME = Blurry Fish Butt

+111 −32
Original line number Diff line number Diff line
@@ -133,23 +133,23 @@ struct rhashtable_params {
/**
 * struct rhashtable - Hash table handle
 * @tbl: Bucket table
 * @nelems: Number of elements in table
 * @key_len: Key length for hashfn
 * @elasticity: Maximum chain length before rehash
 * @p: Configuration parameters
 * @run_work: Deferred worker to expand/shrink asynchronously
 * @mutex: Mutex to protect current/future table swapping
 * @lock: Spin lock to protect walker list
 * @nelems: Number of elements in table
 */
struct rhashtable {
	struct bucket_table __rcu	*tbl;
	atomic_t			nelems;
	unsigned int			key_len;
	unsigned int			elasticity;
	struct rhashtable_params	p;
	struct work_struct		run_work;
	struct mutex                    mutex;
	spinlock_t			lock;
	atomic_t			nelems;
};

/**
@@ -343,7 +343,8 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
					    const void *key,
					    struct rhash_head *obj,
					    struct bucket_table *old_tbl);
					    struct bucket_table *old_tbl,
					    void **data);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);

int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
@@ -514,18 +515,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
	return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}

/**
 * rhashtable_lookup_fast - search hash table, inlined version
 * @ht:		hash table
 * @key:	the pointer to the key
 * @params:	hash table parameters
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * Returns the first entry on which the compare function returned true.
 */
static inline void *rhashtable_lookup_fast(
/* Internal function, do not use. */
static inline struct rhash_head *__rhashtable_lookup(
	struct rhashtable *ht, const void *key,
	const struct rhashtable_params params)
{
@@ -537,8 +528,6 @@ static inline void *rhashtable_lookup_fast(
	struct rhash_head *he;
	unsigned int hash;

	rcu_read_lock();

	tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
	hash = rht_key_hashfn(ht, tbl, key, params);
@@ -547,8 +536,7 @@ restart:
		    params.obj_cmpfn(&arg, rht_obj(ht, he)) :
		    rhashtable_compare(&arg, rht_obj(ht, he)))
			continue;
		rcu_read_unlock();
		return rht_obj(ht, he);
		return he;
	}

	/* Ensure we see any new tables. */
@@ -557,13 +545,64 @@ restart:
	tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (unlikely(tbl))
		goto restart;
	rcu_read_unlock();

	return NULL;
}

/* Internal function, please use rhashtable_insert_fast() instead */
static inline int __rhashtable_insert_fast(
/**
 * rhashtable_lookup - search hash table
 * @ht:		hash table
 * @key:	the pointer to the key
 * @params:	hash table parameters
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This must only be called under the RCU read lock.
 *
 * Returns the first entry on which the compare function returned true.
 */
static inline void *rhashtable_lookup(
	struct rhashtable *ht, const void *key,
	const struct rhashtable_params params)
{
	struct rhash_head *he = __rhashtable_lookup(ht, key, params);

	return he ? rht_obj(ht, he) : NULL;
}

/**
 * rhashtable_lookup_fast - search hash table, without RCU read lock
 * @ht:		hash table
 * @key:	the pointer to the key
 * @params:	hash table parameters
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * Only use this function when you have other mechanisms guaranteeing
 * that the object won't go away after the RCU read lock is released.
 *
 * Returns the first entry on which the compare function returned true.
 */
static inline void *rhashtable_lookup_fast(
	struct rhashtable *ht, const void *key,
	const struct rhashtable_params params)
{
	void *obj;

	rcu_read_lock();
	obj = rhashtable_lookup(ht, key, params);
	rcu_read_unlock();

	return obj;
}

/* Internal function, please use rhashtable_insert_fast() instead. This
 * function returns the existing element already in hashes in there is a clash,
 * otherwise it returns an error via ERR_PTR().
 */
static inline void *__rhashtable_insert_fast(
	struct rhashtable *ht, const void *key, struct rhash_head *obj,
	const struct rhashtable_params params)
{
@@ -576,6 +615,7 @@ static inline int __rhashtable_insert_fast(
	spinlock_t *lock;
	unsigned int elasticity;
	unsigned int hash;
	void *data = NULL;
	int err;

restart:
@@ -600,11 +640,14 @@ restart:

	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (unlikely(new_tbl)) {
		tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
		tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
		if (!IS_ERR_OR_NULL(tbl))
			goto slow_path;

		err = PTR_ERR(tbl);
		if (err == -EEXIST)
			err = 0;

		goto out;
	}

@@ -618,25 +661,25 @@ slow_path:
		err = rhashtable_insert_rehash(ht, tbl);
		rcu_read_unlock();
		if (err)
			return err;
			return ERR_PTR(err);

		goto restart;
	}

	err = -EEXIST;
	err = 0;
	elasticity = ht->elasticity;
	rht_for_each(head, tbl, hash) {
		if (key &&
		    unlikely(!(params.obj_cmpfn ?
			       params.obj_cmpfn(&arg, rht_obj(ht, head)) :
			       rhashtable_compare(&arg, rht_obj(ht, head)))))
			       rhashtable_compare(&arg, rht_obj(ht, head))))) {
			data = rht_obj(ht, head);
			goto out;
		}
		if (!--elasticity)
			goto slow_path;
	}

	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);
@@ -651,7 +694,7 @@ out:
	spin_unlock_bh(lock);
	rcu_read_unlock();

	return err;
	return err ? ERR_PTR(err) : data;
}

/**
@@ -674,7 +717,13 @@ static inline int rhashtable_insert_fast(
	struct rhashtable *ht, struct rhash_head *obj,
	const struct rhashtable_params params)
{
	return __rhashtable_insert_fast(ht, NULL, obj, params);
	void *ret;

	ret = __rhashtable_insert_fast(ht, NULL, obj, params);
	if (IS_ERR(ret))
		return PTR_ERR(ret);

	return ret == NULL ? 0 : -EEXIST;
}

/**
@@ -703,11 +752,15 @@ static inline int rhashtable_lookup_insert_fast(
	const struct rhashtable_params params)
{
	const char *key = rht_obj(ht, obj);
	void *ret;

	BUG_ON(ht->p.obj_hashfn);

	return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
					params);
	ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
	if (IS_ERR(ret))
		return PTR_ERR(ret);

	return ret == NULL ? 0 : -EEXIST;
}

/**
@@ -735,6 +788,32 @@ static inline int rhashtable_lookup_insert_fast(
static inline int rhashtable_lookup_insert_key(
	struct rhashtable *ht, const void *key, struct rhash_head *obj,
	const struct rhashtable_params params)
{
	void *ret;

	BUG_ON(!ht->p.obj_hashfn || !key);

	ret = __rhashtable_insert_fast(ht, key, obj, params);
	if (IS_ERR(ret))
		return PTR_ERR(ret);

	return ret == NULL ? 0 : -EEXIST;
}

/**
 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @params:	hash table parameters
 * @data:	pointer to element data already in hashes
 *
 * Just like rhashtable_lookup_insert_key(), but this function returns the
 * object if it exists, NULL if it does not and the insertion was successful,
 * and an ERR_PTR otherwise.
 */
static inline void *rhashtable_lookup_get_insert_key(
	struct rhashtable *ht, const void *key, struct rhash_head *obj,
	const struct rhashtable_params params)
{
	BUG_ON(!ht->p.obj_hashfn || !key);

+10 −6
Original line number Diff line number Diff line
@@ -556,9 +556,14 @@ struct sk_buff {
				struct skb_mstamp skb_mstamp;
			};
		};
		struct rb_node	rbnode; /* used in netem & tcp stack */
		struct rb_node		rbnode; /* used in netem, ip4 defrag, and tcp stack */
	};

	union {
		struct sock		*sk;
		int			ip_defrag_offset;
	};

	struct net_device	*dev;

	/*
@@ -2273,7 +2278,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
		kfree_skb(skb);
}

void skb_rbtree_purge(struct rb_root *root);
unsigned int skb_rbtree_purge(struct rb_root *root);

void *netdev_alloc_frag(unsigned int fragsz);

@@ -2791,6 +2796,7 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
	return skb->data;
}

int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
 *	pskb_trim_rcsum - trim received skb and update checksum
 *	@skb: buffer to trim
@@ -2805,9 +2811,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (likely(len >= skb->len))
		return 0;
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	return __pskb_trim(skb, len);
	return pskb_trim_rcsum_slow(skb, len);
}

#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+62 −71
Original line number Diff line number Diff line
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__

#include <linux/rhashtable.h>

struct netns_frags {
	/* Keep atomic mem on separate cachelines in structs that include it */
	atomic_t		mem ____cacheline_aligned_in_smp;
	/* sysctls */
	long			high_thresh;
	long			low_thresh;
	int			timeout;
	int			high_thresh;
	int			low_thresh;
	struct inet_frags	*f;

	struct rhashtable       rhashtable ____cacheline_aligned_in_smp;

	/* Keep atomic mem on separate cachelines in structs that include it */
	atomic_long_t		mem ____cacheline_aligned_in_smp;
};

/**
@@ -23,74 +29,68 @@ enum {
	INET_FRAG_COMPLETE	= BIT(2),
};

struct frag_v4_compare_key {
	__be32		saddr;
	__be32		daddr;
	u32		user;
	u32		vif;
	__be16		id;
	u16		protocol;
};

struct frag_v6_compare_key {
	struct in6_addr	saddr;
	struct in6_addr	daddr;
	u32		user;
	__be32		id;
	u32		iif;
};

/**
 * struct inet_frag_queue - fragment queue
 *
 * @lock: spinlock protecting the queue
 * @node: rhash node
 * @key: keys identifying this frag.
 * @timer: queue expiration timer
 * @list: hash bucket list
 * @lock: spinlock protecting this frag
 * @refcnt: reference count of the queue
 * @fragments: received fragments head
 * @rb_fragments: received fragments rb-tree root
 * @fragments_tail: received fragments tail
 * @last_run_head: the head of the last "run". see ip_fragment.c
 * @stamp: timestamp of the last received fragment
 * @len: total length of the original datagram
 * @meat: length of received fragments so far
 * @flags: fragment queue flags
 * @max_size: maximum received fragment size
 * @net: namespace that this frag belongs to
 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
 * @rcu: rcu head for freeing deferall
 */
struct inet_frag_queue {
	spinlock_t		lock;
	struct rhash_head	node;
	union {
		struct frag_v4_compare_key v4;
		struct frag_v6_compare_key v6;
	} key;
	struct timer_list	timer;
	struct hlist_node	list;
	spinlock_t		lock;
	atomic_t		refcnt;
	struct sk_buff		*fragments;
	struct sk_buff		*fragments;  /* Used in IPv6. */
	struct rb_root		rb_fragments; /* Used in IPv4. */
	struct sk_buff		*fragments_tail;
	struct sk_buff		*last_run_head;
	ktime_t			stamp;
	int			len;
	int			meat;
	__u8			flags;
	u16			max_size;
	struct netns_frags      *net;
	struct hlist_node	list_evictor;
};

#define INETFRAGS_HASHSZ	1024

/* averaged:
 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
 *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
 *	       struct frag_queue))
 */
#define INETFRAGS_MAXDEPTH	128

struct inet_frag_bucket {
	struct hlist_head	chain;
	spinlock_t		chain_lock;
	struct rcu_head		rcu;
};

struct inet_frags {
	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];

	struct work_struct	frags_work;
	unsigned int next_bucket;
	unsigned long last_rebuild_jiffies;
	bool rebuild;

	/* The first call to hashfn is responsible to initialize
	 * rnd. This is best done with net_get_random_once.
	 *
	 * rnd_seqlock is used to let hash insertion detect
	 * when it needs to re-lookup the hash chain to use.
	 */
	u32			rnd;
	seqlock_t		rnd_seqlock;
	int			qsize;

	unsigned int		(*hashfn)(const struct inet_frag_queue *);
	bool			(*match)(const struct inet_frag_queue *q,
					 const void *arg);
	void			(*constructor)(struct inet_frag_queue *q,
					       const void *arg);
	void			(*destructor)(struct inet_frag_queue *);
@@ -98,56 +98,47 @@ struct inet_frags {
	void			(*frag_expire)(unsigned long data);
	struct kmem_cache	*frags_cachep;
	const char		*frags_cache_name;
	struct rhashtable_params rhash_params;
};

int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);

static inline void inet_frags_init_net(struct netns_frags *nf)
static inline int inet_frags_init_net(struct netns_frags *nf)
{
	atomic_set(&nf->mem, 0);
	atomic_long_set(&nf->mem, 0);
	return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
}
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frags_exit_net(struct netns_frags *nf);

void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
		struct inet_frags *f, void *key, unsigned int hash);
void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);

void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix);
/* Free all skbs in the queue; return the sum of their truesizes. */
unsigned int inet_frag_rbtree_purge(struct rb_root *root);

static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
static inline void inet_frag_put(struct inet_frag_queue *q)
{
	if (atomic_dec_and_test(&q->refcnt))
		inet_frag_destroy(q, f);
}

static inline bool inet_frag_evicting(struct inet_frag_queue *q)
{
	return !hlist_unhashed(&q->list_evictor);
		inet_frag_destroy(q);
}

/* Memory Tracking Functions. */

static inline int frag_mem_limit(struct netns_frags *nf)
{
	return atomic_read(&nf->mem);
}

static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
static inline long frag_mem_limit(const struct netns_frags *nf)
{
	atomic_sub(i, &nf->mem);
	return atomic_long_read(&nf->mem);
}

static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
{
	atomic_add(i, &nf->mem);
	atomic_long_sub(val, &nf->mem);
}

static inline int sum_frag_mem_limit(struct netns_frags *nf)
static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
{
	return atomic_read(&nf->mem);
	atomic_long_add(val, &nf->mem);
}

/* RFC 3168 support :
Loading