Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f8abf5d authored by Florian Westphal's avatar Florian Westphal Committed by android-build-merger
Browse files

netfilter: conntrack: refine gc worker heuristics, redux

am: 371d0342

Change-Id: I4c4c2c9e5ff4d36b76f7a28332ac6f47014d5b6f
parents 615124cf 371d0342
Loading
Loading
Loading
Loading
+20 −19
Original line number Diff line number Diff line
@@ -85,9 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;

/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
#define GC_MAX_BUCKETS_DIV	64u
/* upper bound of scan intervals */
#define GC_INTERVAL_MAX		(2 * HZ)
#define GC_MAX_BUCKETS_DIV	128u
/* upper bound of full table scan */
#define GC_MAX_SCAN_JIFFIES	(16u * HZ)
/* desired ratio of entries found to be expired */
#define GC_EVICT_RATIO	50u

static struct conntrack_gc_work conntrack_gc_work;

@@ -936,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)

static void gc_worker(struct work_struct *work)
{
	unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
	unsigned int i, goal, buckets = 0, expired_count = 0;
	struct conntrack_gc_work *gc_work;
	unsigned int ratio, scanned = 0;
@@ -994,27 +997,25 @@ static void gc_worker(struct work_struct *work)
	 * 1. Minimize time until we notice a stale entry
	 * 2. Maximize scan intervals to not waste cycles
	 *
	 * Normally, expired_count will be 0, this increases the next_run time
	 * to priorize 2) above.
	 * Normally, expire ratio will be close to 0.
	 *
	 * As soon as a timed-out entry is found, move towards 1) and increase
	 * the scan frequency.
	 * In case we have lots of evictions next scan is done immediately.
	 * As soon as a sizeable fraction of the entries have expired
	 * increase scan frequency.
	 */
	ratio = scanned ? expired_count * 100 / scanned : 0;
	if (ratio >= 90) {
		gc_work->next_gc_run = 0;
		next_run = 0;
	} else if (expired_count) {
		gc_work->next_gc_run /= 2U;
		next_run = msecs_to_jiffies(1);
	if (ratio > GC_EVICT_RATIO) {
		gc_work->next_gc_run = min_interval;
	} else {
		if (gc_work->next_gc_run < GC_INTERVAL_MAX)
			gc_work->next_gc_run += msecs_to_jiffies(1);
		unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;

		next_run = gc_work->next_gc_run;
		BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);

		gc_work->next_gc_run += min_interval;
		if (gc_work->next_gc_run > max)
			gc_work->next_gc_run = max;
	}

	next_run = gc_work->next_gc_run;
	gc_work->last_bucket = i;
	queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
}
@@ -1022,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
	INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
	gc_work->next_gc_run = GC_INTERVAL_MAX;
	gc_work->next_gc_run = HZ;
	gc_work->exiting = false;
}

@@ -1915,7 +1916,7 @@ int nf_conntrack_init_start(void)
	nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);

	conntrack_gc_work_init(&conntrack_gc_work);
	queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
	queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);

	return 0;