Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5fdd8207 authored by Tim Murray's avatar Tim Murray Committed by android-build-merger
Browse files

Merge "Revert "Revert "DispSync: Always resync after inactivity""" into...

Merge "Revert "Revert "DispSync: Always resync after inactivity""" into nyc-dev am: 6328134d am: 11e3dd84
am: 6d3bfe4f

* commit '6d3bfe4f':
  Revert "Revert "DispSync: Always resync after inactivity""

Change-Id: Ic0c65f03aa58e4142a411f808efbf90e7d10c133
parents 29619417 6d3bfe4f
Loading
Loading
Loading
Loading
+148 −39
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
 */

#define ATRACE_TAG ATRACE_TAG_GRAPHICS
//#define LOG_NDEBUG 0

// This is needed for stdint.h to define INT64_MAX in C++
#define __STDC_LIMIT_MACROS
@@ -33,12 +34,21 @@
#include "DispSync.h"
#include "EventLog/EventLog.h"

#include <algorithm>

using std::max;
using std::min;

namespace android {

// Setting this to true enables verbose tracing that can be used to debug
// vsync event model or phase issues.
static const bool kTraceDetailedInfo = false;

// Setting this to true adds a zero-phase tracer for correlating with hardware
// vsync events
static const bool kEnableZeroPhaseTracer = false;

// This is the threshold used to determine when hardware vsync events are
// needed to re-synchronize the software vsync model with the hardware.  The
// error metric used is the mean of the squared difference between each
@@ -49,28 +59,36 @@ static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
// vsync event.
static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;

#undef LOG_TAG
#define LOG_TAG "DispSyncThread"
class DispSyncThread: public Thread {
public:

    DispSyncThread():
    DispSyncThread(const char* name):
            mName(name),
            mStop(false),
            mPeriod(0),
            mPhase(0),
            mReferenceTime(0),
            mWakeupLatency(0) {
    }
            mWakeupLatency(0),
            mFrameNumber(0) {}

    virtual ~DispSyncThread() {}

    void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        Mutex::Autolock lock(mMutex);
        mPeriod = period;
        mPhase = phase;
        mReferenceTime = referenceTime;
        ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
                " mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
                ns2us(mPhase), ns2us(mReferenceTime));
        mCond.signal();
    }

    void stop() {
        if (kTraceDetailedInfo) ATRACE_CALL();
        Mutex::Autolock lock(mMutex);
        mStop = true;
        mCond.signal();
@@ -89,6 +107,12 @@ public:
            { // Scope for lock
                Mutex::Autolock lock(mMutex);

                if (kTraceDetailedInfo) {
                    ATRACE_INT64("DispSync:Frame", mFrameNumber);
                }
                ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
                ++mFrameNumber;

                if (mStop) {
                    return false;
                }
@@ -109,6 +133,9 @@ public:
                bool isWakeup = false;

                if (now < targetTime) {
                    ALOGV("[%s] Waiting until %" PRId64, mName,
                            ns2us(targetTime));
                    if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
                    err = mCond.waitRelative(mMutex, targetTime - now);

                    if (err == TIMED_OUT) {
@@ -122,15 +149,15 @@ public:

                now = systemTime(SYSTEM_TIME_MONOTONIC);

                // Don't correct by more than 1.5 ms
                static const nsecs_t kMaxWakeupLatency = us2ns(1500);

                if (isWakeup) {
                    mWakeupLatency = ((mWakeupLatency * 63) +
                            (now - targetTime)) / 64;
                    if (mWakeupLatency > 500000) {
                        // Don't correct by more than 500 us
                        mWakeupLatency = 500000;
                    }
                    mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
                    if (kTraceDetailedInfo) {
                        ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
                        ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
                        ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
                    }
                }
@@ -146,7 +173,9 @@ public:
        return false;
    }

    status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
    status_t addEventListener(const char* name, nsecs_t phase,
            const sp<DispSync::Callback>& callback) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        Mutex::Autolock lock(mMutex);

        for (size_t i = 0; i < mEventListeners.size(); i++) {
@@ -156,15 +185,14 @@ public:
        }

        EventListener listener;
        listener.mName = name;
        listener.mPhase = phase;
        listener.mCallback = callback;

        // We want to allow the firstmost future event to fire without
        // allowing any past events to fire.  Because
        // computeListenerNextEventTimeLocked filters out events within a half
        // a period of the last event time, we need to initialize the last
        // event time to a half a period in the past.
        listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
        // allowing any past events to fire
        listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase -
                mWakeupLatency;

        mEventListeners.push(listener);

@@ -174,6 +202,7 @@ public:
    }

    status_t removeEventListener(const sp<DispSync::Callback>& callback) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        Mutex::Autolock lock(mMutex);

        for (size_t i = 0; i < mEventListeners.size(); i++) {
@@ -189,6 +218,7 @@ public:

    // This method is only here to handle the kIgnorePresentFences case.
    bool hasAnyEventListeners() {
        if (kTraceDetailedInfo) ATRACE_CALL();
        Mutex::Autolock lock(mMutex);
        return !mEventListeners.empty();
    }
@@ -196,6 +226,7 @@ public:
private:

    struct EventListener {
        const char* mName;
        nsecs_t mPhase;
        nsecs_t mLastEventTime;
        sp<DispSync::Callback> mCallback;
@@ -207,6 +238,8 @@ private:
    };

    nsecs_t computeNextEventTimeLocked(nsecs_t now) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        ALOGV("[%s] computeNextEventTimeLocked", mName);
        nsecs_t nextEventTime = INT64_MAX;
        for (size_t i = 0; i < mEventListeners.size(); i++) {
            nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
@@ -217,21 +250,28 @@ private:
            }
        }

        ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
        return nextEventTime;
    }

    Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName,
                ns2us(now));

        Vector<CallbackInvocation> callbackInvocations;
        nsecs_t ref = now - mPeriod;
        nsecs_t onePeriodAgo = now - mPeriod;

        for (size_t i = 0; i < mEventListeners.size(); i++) {
            nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
                    ref);
                    onePeriodAgo);

            if (t < now) {
                CallbackInvocation ci;
                ci.mCallback = mEventListeners[i].mCallback;
                ci.mEventTime = t;
                ALOGV("[%s] [%s] Preparing to fire", mName,
                        mEventListeners[i].mName);
                callbackInvocations.push(ci);
                mEventListeners.editItemAt(i).mLastEventTime = t;
            }
@@ -241,29 +281,67 @@ private:
    }

    nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
            nsecs_t ref) {

        nsecs_t lastEventTime = listener.mLastEventTime;
        if (ref < lastEventTime) {
            ref = lastEventTime;
        }

        nsecs_t phase = mReferenceTime + mPhase + listener.mPhase;
        nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;

        if (t - listener.mLastEventTime < mPeriod / 2) {
            nsecs_t baseTime) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")",
                mName, listener.mName, ns2us(baseTime));

        nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
        ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
        if (baseTime < lastEventTime) {
            baseTime = lastEventTime;
            ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName,
                    ns2us(baseTime));
        }

        baseTime -= mReferenceTime;
        ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
        nsecs_t phase = mPhase + listener.mPhase;
        ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
        baseTime -= phase;
        ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));

        // If our previous time is before the reference (because the reference
        // has since been updated), the division by mPeriod will truncate
        // towards zero instead of computing the floor. Since in all cases
        // before the reference we want the next time to be effectively now, we
        // set baseTime to -mPeriod so that numPeriods will be -1.
        // When we add 1 and the phase, we will be at the correct event time for
        // this period.
        if (baseTime < 0) {
            ALOGV("[%s] Correcting negative baseTime", mName);
            baseTime = -mPeriod;
        }

        nsecs_t numPeriods = baseTime / mPeriod;
        ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
        nsecs_t t = (numPeriods + 1) * mPeriod + phase;
        ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
        t += mReferenceTime;
        ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));

        // Check that it's been slightly more than half a period since the last
        // event so that we don't accidentally fall into double-rate vsyncs
        if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
            t += mPeriod;
            ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
        }

        t -= mWakeupLatency;
        ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));

        return t;
    }

    void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
        if (kTraceDetailedInfo) ATRACE_CALL();
        for (size_t i = 0; i < callbacks.size(); i++) {
            callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
        }
    }

    const char* const mName;

    bool mStop;

    nsecs_t mPeriod;
@@ -271,12 +349,17 @@ private:
    nsecs_t mReferenceTime;
    nsecs_t mWakeupLatency;

    int64_t mFrameNumber;

    Vector<EventListener> mEventListeners;

    Mutex mMutex;
    Condition mCond;
};

#undef LOG_TAG
#define LOG_TAG "DispSync"

class ZeroPhaseTracer : public DispSync::Callback {
public:
    ZeroPhaseTracer() : mParity(false) {}
@@ -290,9 +373,10 @@ private:
    bool mParity;
};

DispSync::DispSync() :
DispSync::DispSync(const char* name) :
        mName(name),
        mRefreshSkipCount(0),
        mThread(new DispSyncThread()) {
        mThread(new DispSyncThread(name)) {

    mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);

@@ -305,8 +389,8 @@ DispSync::DispSync() :
        // Even if we're just ignoring the fences, the zero-phase tracing is
        // not needed because any time there is an event registered we will
        // turn on the HW vsync events.
        if (!kIgnorePresentFences) {
            addEventListener(0, new ZeroPhaseTracer());
        if (!kIgnorePresentFences && kEnableZeroPhaseTracer) {
            addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer());
        }
    }
}
@@ -351,7 +435,7 @@ bool DispSync::addPresentFence(const sp<Fence>& fence) {

void DispSync::beginResync() {
    Mutex::Autolock lock(mMutex);

    ALOGV("[%s] beginResync", mName);
    mModelUpdated = false;
    mNumResyncSamples = 0;
}
@@ -359,11 +443,17 @@ void DispSync::beginResync() {
bool DispSync::addResyncSample(nsecs_t timestamp) {
    Mutex::Autolock lock(mMutex);

    ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));

    size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
    mResyncSamples[idx] = timestamp;
    if (mNumResyncSamples == 0) {
        mPhase = 0;
        mReferenceTime = timestamp;
        ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
                "mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
                ns2us(mReferenceTime));
        mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    }

    if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
@@ -387,17 +477,21 @@ bool DispSync::addResyncSample(nsecs_t timestamp) {
        return mThread->hasAnyEventListeners();
    }

    return !mModelUpdated || mError > kErrorThreshold;
    // Check against kErrorThreshold / 2 to add some hysteresis before having to
    // resync again
    bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
    ALOGV("[%s] addResyncSample returning %s", mName,
            modelLocked ? "locked" : "unlocked");
    return !modelLocked;
}

void DispSync::endResync() {
}

status_t DispSync::addEventListener(nsecs_t phase,
status_t DispSync::addEventListener(const char* name, nsecs_t phase,
        const sp<Callback>& callback) {

    Mutex::Autolock lock(mMutex);
    return mThread->addEventListener(phase, callback);
    return mThread->addEventListener(name, phase, callback);
}

void DispSync::setRefreshSkipCount(int count) {
@@ -427,20 +521,32 @@ nsecs_t DispSync::getPeriod() {
}

void DispSync::updateModelLocked() {
    ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
    if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
        ALOGV("[%s] Computing...", mName);
        nsecs_t durationSum = 0;
        nsecs_t minDuration = INT64_MAX;
        nsecs_t maxDuration = 0;
        for (size_t i = 1; i < mNumResyncSamples; i++) {
            size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
            size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
            durationSum += mResyncSamples[idx] - mResyncSamples[prev];
            nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
            durationSum += duration;
            minDuration = min(minDuration, duration);
            maxDuration = max(maxDuration, duration);
        }

        mPeriod = durationSum / (mNumResyncSamples - 1);
        // Exclude the min and max from the average
        durationSum -= minDuration + maxDuration;
        mPeriod = durationSum / (mNumResyncSamples - 3);

        ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));

        double sampleAvgX = 0;
        double sampleAvgY = 0;
        double scale = 2.0 * M_PI / double(mPeriod);
        for (size_t i = 0; i < mNumResyncSamples; i++) {
        // Intentionally skip the first sample
        for (size_t i = 1; i < mNumResyncSamples; i++) {
            size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
            nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
            double samplePhase = double(sample % mPeriod) * scale;
@@ -448,18 +554,21 @@ void DispSync::updateModelLocked() {
            sampleAvgY += sin(samplePhase);
        }

        sampleAvgX /= double(mNumResyncSamples);
        sampleAvgY /= double(mNumResyncSamples);
        sampleAvgX /= double(mNumResyncSamples - 1);
        sampleAvgY /= double(mNumResyncSamples - 1);

        mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);

        if (mPhase < 0) {
        ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));

        if (mPhase < -(mPeriod / 2)) {
            mPhase += mPeriod;
            ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
        }

        if (kTraceDetailedInfo) {
            ATRACE_INT64("DispSync:Period", mPeriod);
            ATRACE_INT64("DispSync:Phase", mPhase);
            ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
        }

        // Artificially inflate the period if requested.
+8 −8
Original line number Diff line number Diff line
@@ -26,11 +26,8 @@
namespace android {

// Ignore present (retire) fences if the device doesn't have support for the
// sync framework, or if all phase offsets are zero.  The latter is useful
// because it allows us to avoid resync bursts on devices that don't need
// phase-offset VSYNC events.
#if defined(RUNNING_WITHOUT_SYNC_FRAMEWORK) || \
        (VSYNC_EVENT_PHASE_OFFSET_NS == 0 && SF_VSYNC_EVENT_PHASE_OFFSET_NS == 0)
// sync framework
#if defined(RUNNING_WITHOUT_SYNC_FRAMEWORK)
static const bool kIgnorePresentFences = true;
#else
static const bool kIgnorePresentFences = false;
@@ -64,7 +61,7 @@ public:
        virtual void onDispSyncEvent(nsecs_t when) = 0;
    };

    DispSync();
    DispSync(const char* name);
    ~DispSync();

    // reset clears the resync samples and error value.
@@ -114,7 +111,8 @@ public:
    // given phase offset from the hardware vsync events.  The callback is
    // called from a separate thread and it should return reasonably quickly
    // (i.e. within a few hundred microseconds).
    status_t addEventListener(nsecs_t phase, const sp<Callback>& callback);
    status_t addEventListener(const char* name, nsecs_t phase,
            const sp<Callback>& callback);

    // removeEventListener removes an already-registered event callback.  Once
    // this method returns that callback will no longer be called by the
@@ -137,10 +135,12 @@ private:
    void resetErrorLocked();

    enum { MAX_RESYNC_SAMPLES = 32 };
    enum { MIN_RESYNC_SAMPLES_FOR_UPDATE = 3 };
    enum { MIN_RESYNC_SAMPLES_FOR_UPDATE = 6 };
    enum { NUM_PRESENT_SAMPLES = 8 };
    enum { MAX_RESYNC_SAMPLES_WITHOUT_PRESENT = 4 };

    const char* const mName;

    // mPeriod is the computed period of the modeled vsync events in
    // nanoseconds.
    nsecs_t mPeriod;
+5 −1
Original line number Diff line number Diff line
@@ -44,8 +44,9 @@ static void vsyncOffCallback(union sigval val) {
    return;
}

EventThread::EventThread(const sp<VSyncSource>& src)
EventThread::EventThread(const sp<VSyncSource>& src, SurfaceFlinger& flinger)
    : mVSyncSource(src),
      mFlinger(flinger),
      mUseSoftwareVSync(false),
      mVsyncEnabled(false),
      mDebugVsyncEnabled(false),
@@ -126,6 +127,9 @@ void EventThread::setVsyncRate(uint32_t count,
void EventThread::requestNextVsync(
        const sp<EventThread::Connection>& connection) {
    Mutex::Autolock _l(mLock);

    mFlinger.resyncWithRateLimit();

    if (connection->count < 0) {
        connection->count = 0;
        mCondition.broadcast();
+2 −1
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ class EventThread : public Thread, private VSyncSource::Callback {

public:

    EventThread(const sp<VSyncSource>& src);
    EventThread(const sp<VSyncSource>& src, SurfaceFlinger& flinger);

    sp<Connection> createEventConnection() const;
    status_t registerDisplayEventConnection(const sp<Connection>& connection);
@@ -116,6 +116,7 @@ private:
    // constants
    sp<VSyncSource> mVSyncSource;
    PowerHAL mPowerHAL;
    SurfaceFlinger& mFlinger;

    mutable Mutex mLock;
    mutable Condition mCondition;
+18 −7
Original line number Diff line number Diff line
@@ -149,6 +149,7 @@ SurfaceFlinger::SurfaceFlinger()
        mLastTransactionTime(0),
        mBootFinished(false),
        mForceFullDamage(false),
        mPrimaryDispSync("PrimaryDispSync"),
        mPrimaryHWVsyncEnabled(false),
        mHWVsyncAvailable(false),
        mDaltonize(false),
@@ -331,11 +332,12 @@ void SurfaceFlinger::deleteTextureAsync(uint32_t texture) {
class DispSyncSource : public VSyncSource, private DispSync::Callback {
public:
    DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync,
        const char* label) :
        const char* name) :
            mName(name),
            mValue(0),
            mTraceVsync(traceVsync),
            mVsyncOnLabel(String8::format("VsyncOn-%s", label)),
            mVsyncEventLabel(String8::format("VSYNC-%s", label)),
            mVsyncOnLabel(String8::format("VsyncOn-%s", name)),
            mVsyncEventLabel(String8::format("VSYNC-%s", name)),
            mDispSync(dispSync),
            mCallbackMutex(),
            mCallback(),
@@ -348,7 +350,7 @@ public:
    virtual void setVSyncEnabled(bool enable) {
        Mutex::Autolock lock(mVsyncMutex);
        if (enable) {
            status_t err = mDispSync->addEventListener(mPhaseOffset,
            status_t err = mDispSync->addEventListener(mName, mPhaseOffset,
                    static_cast<DispSync::Callback*>(this));
            if (err != NO_ERROR) {
                ALOGE("error registering vsync callback: %s (%d)",
@@ -399,7 +401,7 @@ public:
        }

        // Add a listener with the new offset
        err = mDispSync->addEventListener(mPhaseOffset,
        err = mDispSync->addEventListener(mName, mPhaseOffset,
                static_cast<DispSync::Callback*>(this));
        if (err != NO_ERROR) {
            ALOGE("error registering vsync callback: %s (%d)",
@@ -425,6 +427,8 @@ private:
        }
    }

    const char* const mName;

    int mValue;

    const bool mTraceVsync;
@@ -455,10 +459,10 @@ void SurfaceFlinger::init() {
        // start the EventThread
        sp<VSyncSource> vsyncSrc = new DispSyncSource(&mPrimaryDispSync,
                vsyncPhaseOffsetNs, true, "app");
        mEventThread = new EventThread(vsyncSrc);
        mEventThread = new EventThread(vsyncSrc, *this);
        sp<VSyncSource> sfVsyncSrc = new DispSyncSource(&mPrimaryDispSync,
                sfVsyncPhaseOffsetNs, true, "sf");
        mSFEventThread = new EventThread(sfVsyncSrc);
        mSFEventThread = new EventThread(sfVsyncSrc, *this);
        mEventQueue.setEventThread(mSFEventThread);

        // Get a RenderEngine for the given display / config (can't fail)
@@ -827,6 +831,13 @@ void SurfaceFlinger::disableHardwareVsync(bool makeUnavailable) {
    }
}

void SurfaceFlinger::resyncWithRateLimit() {
    static constexpr nsecs_t kIgnoreDelay = ms2ns(500);
    if (systemTime() - mLastSwapTime > kIgnoreDelay) {
        resyncToHardwareVsync(true);
    }
}

void SurfaceFlinger::onVSyncReceived(int32_t type, nsecs_t timestamp) {
    bool needsHwVsync = false;

Loading