Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5facfb12 authored by Ady Abraham's avatar Ady Abraham
Browse files

SurfaceFlinger: expected present time directly from VSyncReactor

When SurfaceFlinger calculates what is the vsync time, it does
that by asking VSyncReactor what is next anticipated vsync from the
wake up time. To handle negative offsets (when SF wakes up more than
a vsync period before a vsync event) SF adds a vsync period
to that anticipated vsync.

This creates a race condition where the offset used at the
time of invalidate may be different than the offset used by
VSyncReactor calculated at the time of the requestNextVsync().
To fix that, we plumb the first calculated vsync time to
SurfaceFlinger.

Bug: 154303002
Test: Run app transitions and collect systrace
Change-Id: I3f2670c7b0ecb52a85fb07df6d360694b51d5d66
parent a0bb096d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ public:

        struct VSync {
            uint32_t count;
            nsecs_t expectedVSyncTimestamp;
        };

        struct Hotplug {
+1 −1
Original line number Diff line number Diff line
@@ -130,7 +130,7 @@ private:
        mVsyncListening = false;
    }

    void onDispSyncEvent(nsecs_t /* when */) final {
    void onDispSyncEvent(nsecs_t /*when*/, nsecs_t /*expectedVSyncTimestamp*/) final {
        std::unique_lock<decltype(mMutex)> lock(mMutex);

        if (mPhaseIntervalSetting == Phase::ZERO) {
+27 −6
Original line number Diff line number Diff line
@@ -200,7 +200,8 @@ public:
                    }
                }

                callbackInvocations = gatherCallbackInvocationsLocked(now);
                callbackInvocations =
                        gatherCallbackInvocationsLocked(now, computeNextRefreshLocked(0, now));
            }

            if (callbackInvocations.size() > 0) {
@@ -303,6 +304,11 @@ public:
        return BAD_VALUE;
    }

    nsecs_t computeNextRefresh(int periodOffset, nsecs_t now) const {
        Mutex::Autolock lock(mMutex);
        return computeNextRefreshLocked(periodOffset, now);
    }

private:
    struct EventListener {
        const char* mName;
@@ -315,6 +321,7 @@ private:
    struct CallbackInvocation {
        DispSync::Callback* mCallback;
        nsecs_t mEventTime;
        nsecs_t mExpectedVSyncTime;
    };

    nsecs_t computeNextEventTimeLocked(nsecs_t now) {
@@ -340,7 +347,8 @@ private:
        return duration < (3 * mPeriod) / 5;
    }

    std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
    std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now,
                                                                    nsecs_t expectedVSyncTime) {
        if (mTraceDetailedInfo) ATRACE_CALL();
        ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, ns2us(now));

@@ -361,6 +369,10 @@ private:
                CallbackInvocation ci;
                ci.mCallback = eventListener.mCallback;
                ci.mEventTime = t;
                ci.mExpectedVSyncTime = expectedVSyncTime;
                if (eventListener.mPhase < 0) {
                    ci.mExpectedVSyncTime += mPeriod;
                }
                ALOGV("[%s] [%s] Preparing to fire, latency: %" PRId64, mName, eventListener.mName,
                      t - eventListener.mLastEventTime);
                callbackInvocations.push_back(ci);
@@ -426,8 +438,17 @@ private:
    void fireCallbackInvocations(const std::vector<CallbackInvocation>& callbacks) {
        if (mTraceDetailedInfo) ATRACE_CALL();
        for (size_t i = 0; i < callbacks.size(); i++) {
            callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
            callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime,
                                                    callbacks[i].mExpectedVSyncTime);
        }
    }

    nsecs_t computeNextRefreshLocked(int periodOffset, nsecs_t now) const {
        nsecs_t phase = mReferenceTime + mPhase;
        if (mPeriod == 0) {
            return 0;
        }
        return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
    }

    const char* const mName;
@@ -444,7 +465,7 @@ private:

    std::vector<EventListener> mEventListeners;

    Mutex mMutex;
    mutable Mutex mMutex;
    Condition mCond;

    // Flag to turn on logging in systrace.
@@ -458,7 +479,7 @@ class ZeroPhaseTracer : public DispSync::Callback {
public:
    ZeroPhaseTracer() : mParity("ZERO_PHASE_VSYNC", false) {}

    virtual void onDispSyncEvent(nsecs_t /*when*/) {
    virtual void onDispSyncEvent(nsecs_t /*when*/, nsecs_t /*expectedVSyncTimestamp*/) {
        mParity = !mParity;
    }

@@ -845,7 +866,7 @@ nsecs_t DispSync::expectedPresentTime(nsecs_t now) {
    const uint32_t hwcLatency = 0;

    // Ask DispSync when the next refresh will be (CLOCK_MONOTONIC).
    return computeNextRefresh(hwcLatency, now);
    return mThread->computeNextRefresh(hwcLatency, now);
}

} // namespace impl
+1 −1
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ public:
    public:
        Callback() = default;
        virtual ~Callback();
        virtual void onDispSyncEvent(nsecs_t when) = 0;
        virtual void onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) = 0;

    protected:
        Callback(Callback const&) = delete;
+2 −2
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ void DispSyncSource::setPhaseOffset(nsecs_t phaseOffset) {
    }
}

void DispSyncSource::onDispSyncEvent(nsecs_t when) {
void DispSyncSource::onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) {
    VSyncSource::Callback* callback;
    {
        std::lock_guard lock(mCallbackMutex);
@@ -104,7 +104,7 @@ void DispSyncSource::onDispSyncEvent(nsecs_t when) {
    }

    if (callback != nullptr) {
        callback->onVSyncEvent(when);
        callback->onVSyncEvent(when, expectedVSyncTimestamp);
    }
}

Loading