Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 63dadcf7 authored by Suren Baghdasaryan's avatar Suren Baghdasaryan
Browse files

lmkd: Implement kill timeout



(cherry pick from commit caa2dc56)

New ro.lmk.kill_timeout_ms property defines timeout in ms after a
successful kill cycle for more kills to be considered. This is
necessary because memory pressure after a kill does not go down
instantly and system needs time to reflect new memory state. This
timeout prevents extra kills in the period immediately after a
kill cycle. By default it is set to 0 which disables this feature.

Bug: 63631020
Test: alloc-stress

Change-Id: Ia847118c8c4a659a7fc38cd5cd0042acb514ae28
Merged-In: Ia847118c8c4a659a7fc38cd5cd0042acb514ae28
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
parent aa73bafe
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -120,6 +120,7 @@ static int64_t upgrade_pressure;
static int64_t downgrade_pressure;
static bool is_go_device;
static bool kill_heaviest_task;
static unsigned long kill_timeout_ms;

/* control socket listen and data */
static int ctrl_lfd;
@@ -795,6 +796,12 @@ enum vmpressure_level downgrade_level(enum vmpressure_level level) {
        level - 1 : level);
}

static inline unsigned long get_time_diff_ms(struct timeval *from,
                                             struct timeval *to) {
    return (to->tv_sec - from->tv_sec) * 1000 +
           (to->tv_usec - from->tv_usec) / 1000;
}

static void mp_event_common(enum vmpressure_level level) {
    int ret;
    unsigned long long evcount;
@@ -802,6 +809,8 @@ static void mp_event_common(enum vmpressure_level level) {
    int64_t mem_pressure;
    enum vmpressure_level lvl;
    struct mem_size free_mem;
    static struct timeval last_report_tm;
    static unsigned long skip_count = 0;

    /*
     * Check all event counters from low to critical
@@ -816,6 +825,23 @@ static void mp_event_common(enum vmpressure_level level) {
        }
    }

    if (kill_timeout_ms) {
        struct timeval curr_tm;
        gettimeofday(&curr_tm, NULL);
        if (get_time_diff_ms(&last_report_tm, &curr_tm) < kill_timeout_ms) {
            skip_count++;
            return;
        }
    }

    if (skip_count > 0) {
        if (debug_process_killing) {
            ALOGI("%lu memory pressure events were skipped after a kill!",
                skip_count);
        }
        skip_count = 0;
    }

    if (get_free_memory(&free_mem) == 0) {
        if (level == VMPRESS_LEVEL_LOW) {
            record_low_pressure_levels(&free_mem);
@@ -894,6 +920,8 @@ do_kill:
                    ALOGI("Unable to free enough memory (pages freed=%d)",
                        pages_freed);
                }
            } else {
                gettimeofday(&last_report_tm, NULL);
            }
        }
    }
@@ -1081,6 +1109,8 @@ int main(int argc __unused, char **argv __unused) {
    kill_heaviest_task =
        property_get_bool("ro.lmk.kill_heaviest_task", true);
    is_go_device = property_get_bool("ro.config.low_ram", false);
    kill_timeout_ms =
        (unsigned long)property_get_int32("ro.lmk.kill_timeout_ms", 0);

    // MCL_ONFAULT pins pages as they fault instead of loading
    // everything immediately all at once. (Which would be bad,