Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bcc36746 authored by Phil Burk's avatar Phil Burk
Browse files

aaudio: fix timestamps and underruns

Start the client after we get valid timing data from the server.
That can take a while because of the long cold start times.
The client is synced with the current position of the service.
Now the client can start clean with no underruns.

Bug: 63918065
Test: test_timestamps.cpp
Change-Id: I5d01eb844e4b14cd5477d56ea1dd9e309abc1c52
parent 7328a80f
Loading
Loading
Loading
Loading
+70 −19
Original line number Diff line number Diff line
@@ -66,9 +66,9 @@ AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterfa
        , mStreamVolume(1.0f)
        , mInService(inService)
        , mServiceInterface(serviceInterface)
        , mAtomicTimestamp()
        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mAtomicTimestamp()
        {
    ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
          mWakeupDelayNanos, mMinimumSleepNanos);
@@ -250,25 +250,45 @@ static void *aaudio_callback_thread_proc(void *context)
    }
}

/*
 * It normally takes about 20-30 msec to start a stream on the server.
 * But the first time can take as much as 200-300 msec. The HW
 * starts right away so by the time the client gets a chance to write into
 * the buffer, it is already in a deep underflow state. That can cause the
 * XRunCount to be non-zero, which could lead an app to tune its latency higher.
 * To avoid this problem, we set a request for the processing code to start the
 * client stream at the same position as the server stream.
 * The processing code will then save the current offset
 * between client and server and apply that to any position given to the app.
 */
aaudio_result_t AudioStreamInternal::requestStart()
{
    int64_t startTime;
    ALOGD("AudioStreamInternal()::requestStart()");
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::requestStart() mServiceStreamHandle invalid");
        ALOGE("requestStart() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    if (isActive()) {
        ALOGE("AudioStreamInternal::requestStart() already active");
        ALOGE("requestStart() already active");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    aaudio_stream_state_t originalState = getState();

    aaudio_stream_state_t originalState = getState();
    if (originalState == AAUDIO_STREAM_STATE_DISCONNECTED) {
        ALOGE("requestStart() but DISCONNECTED");
        return AAUDIO_ERROR_DISCONNECTED;
    }
    setState(AAUDIO_STREAM_STATE_STARTING);
    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());

    // Clear any stale timestamps from the previous run.
    drainTimestampsFromService();

    status_t status = startWithStatus(); // Call PlayerBase, which will start the device stream.
    aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);

    startTime = AudioClock::getNanoseconds();
    mClockModel.start(startTime);
    mNeedCatchUp.request();  // Ask data processing code to catch up when first timestamp received.

    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
        // Launch the callback loop thread.
@@ -314,13 +334,14 @@ aaudio_result_t AudioStreamInternal::stopCallback()
aaudio_result_t AudioStreamInternal::requestStopInternal()
{
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
        ALOGE("requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
              mServiceStreamHandle);
        return AAUDIO_ERROR_INVALID_STATE;
    }

    mClockModel.stop(AudioClock::getNanoseconds());
    setState(AAUDIO_STREAM_STATE_STOPPING);
    mAtomicTimestamp.clear();
    return AAudioConvert_androidToAAudioResult(stopWithStatus());
}

@@ -336,7 +357,7 @@ aaudio_result_t AudioStreamInternal::requestStop()

aaudio_result_t AudioStreamInternal::registerThread() {
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::registerThread() mServiceStreamHandle invalid");
        ALOGE("registerThread() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
@@ -346,7 +367,7 @@ aaudio_result_t AudioStreamInternal::registerThread() {

aaudio_result_t AudioStreamInternal::unregisterThread() {
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::unregisterThread() mServiceStreamHandle invalid");
        ALOGE("unregisterThread() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, gettid());
@@ -374,13 +395,15 @@ aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
    // Generated in server and passed to client. Return latest.
    if (mAtomicTimestamp.isValid()) {
        Timestamp timestamp = mAtomicTimestamp.read();
        *framePosition = timestamp.getPosition();
        int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
        if (position >= 0) {
            *framePosition = position;
            *timeNanoseconds = timestamp.getNanoseconds();
            return AAUDIO_OK;
    } else {
        return AAUDIO_ERROR_UNAVAILABLE;
        }
    }
    return AAUDIO_ERROR_UNAVAILABLE;
}

aaudio_result_t AudioStreamInternal::updateStateMachine() {
    if (isDataCallbackActive()) {
@@ -394,14 +417,14 @@ void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
    static int64_t oldTime = 0;
    int64_t framePosition = command.timestamp.position;
    int64_t nanoTime = command.timestamp.timestamp;
    ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
    ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
         (long long) framePosition,
         (long long) nanoTime);
    int64_t nanosDelta = nanoTime - oldTime;
    if (nanosDelta > 0 && oldTime > 0) {
        int64_t framesDelta = framePosition - oldPosition;
        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
        ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
        ALOGD("logTimestamp:     framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
              (long long) framesDelta, (long long) nanosDelta, (long long) rate);
    }
    oldPosition = framePosition;
@@ -478,6 +501,34 @@ aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *mes
    return result;
}

aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
    aaudio_result_t result = AAUDIO_OK;

    while (result == AAUDIO_OK) {
        AAudioServiceMessage message;
        if (mAudioEndpoint.readUpCommand(&message) != 1) {
            break; // no command this time, no problem
        }
        switch (message.what) {
            // ignore most messages
            case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
            case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
                break;

            case AAudioServiceMessage::code::EVENT:
                result = onEventFromServer(&message);
                break;

            default:
                ALOGE("WARNING - drainTimestampsFromService() Unrecognized what = %d",
                      (int) message.what);
                result = AAUDIO_ERROR_INTERNAL;
                break;
        }
    }
    return result;
}

// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
    aaudio_result_t result = AAUDIO_OK;
@@ -502,7 +553,7 @@ aaudio_result_t AudioStreamInternal::processCommands() {
            break;

        default:
            ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
            ALOGE("WARNING - processCommands() Unrecognized what = %d",
                 (int) message.what);
            result = AAUDIO_ERROR_INTERNAL;
            break;
@@ -613,7 +664,7 @@ aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
    }

    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
    ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
    ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
    if (result < 0) {
        return result;
    } else {
+7 −2
Original line number Diff line number Diff line
@@ -115,12 +115,15 @@ protected:
                            int64_t currentTimeNanos,
                            int64_t *wakeTimePtr) = 0;

    aaudio_result_t drainTimestampsFromService();

    aaudio_result_t processCommands();

    aaudio_result_t requestStopInternal();

    aaudio_result_t stopCallback();

    virtual void advanceClientToMatchServerPosition() = 0;

    virtual void onFlushFromServer() {}

@@ -167,6 +170,10 @@ protected:

    AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service

    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;

    AtomicRequestor          mNeedCatchUp;   // Ask read() or write() to sync on first timestamp.

private:
    /*
     * Asynchronous write with data conversion.
@@ -188,8 +195,6 @@ private:
    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses

    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;

    int64_t                  mServiceLatencyNanos = 0;

};
+38 −4
Original line number Diff line number Diff line
@@ -39,6 +39,21 @@ AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &

AudioStreamInternalCapture::~AudioStreamInternalCapture() {}

void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();

    // Bump offset so caller does not see the retrograde motion in getFramesRead().
    int64_t offset = readCounter - writeCounter;
    mFramesOffsetFromService += offset;
    ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);

    // Force readCounter to match writeCounter.
    // This is because we cannot change the write counter in the hardware.
    mAudioEndpoint.setDataReadCounter(writeCounter);
}

// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
                                               int64_t timeoutNanoseconds)
@@ -57,6 +72,18 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
    const char *traceName = "aaRdNow";
    ATRACE_BEGIN(traceName);

    if (mClockModel.isStarting()) {
        // Still haven't got any timestamps from server.
        // Keep waiting until we get some valid timestamps then start writing to the
        // current buffer position.
        ALOGD("processDataNow() wait for valid timestamps");
        // Sleep very briefly and hope we get a timestamp soon.
        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
        ATRACE_END();
        return 0;
    }
    // If we have gotten this far then we have at least one timestamp from server.

    if (mAudioEndpoint.isFreeRunning()) {
        //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
        // Update data queue based on the timing model.
@@ -65,6 +92,14 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
        mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
    }

    // This code assumes that we have already received valid timestamps.
    if (mNeedCatchUp.isRequested()) {
        // Catch an MMAP pointer that is already advancing.
        // This will avoid initial underruns caused by a slow cold start.
        advanceClientToMatchServerPosition();
        mNeedCatchUp.acknowledge();
    }

    // If the write index passed the read index then consider it an overrun.
    if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
        mXRunCount++;
@@ -100,8 +135,8 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
                // Calculate frame position based off of the readCounter because
                // the writeCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
                wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
                int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
                wakeTime = mClockModel.convertPositionToTime(nextPosition);
            }
                break;
            default:
@@ -186,8 +221,7 @@ int64_t AudioStreamInternalCapture::getFramesWritten() {
}

int64_t AudioStreamInternalCapture::getFramesRead() {
    int64_t frames = mAudioEndpoint.getDataWriteCounter()
                               + mFramesOffsetFromService;
    int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
    //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
    return frames;
}
+2 −0
Original line number Diff line number Diff line
@@ -46,6 +46,8 @@ public:
    }
protected:

    void advanceClientToMatchServerPosition() override;

/**
 * Low level data processing that will not block. It will just read or write as much as it can.
 *
+32 −8
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@ aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()

    mClockModel.stop(AudioClock::getNanoseconds());
    setState(AAUDIO_STREAM_STATE_PAUSING);
    mAtomicTimestamp.clear();
    return AAudioConvert_androidToAAudioResult(pauseWithStatus());
}

@@ -72,21 +73,25 @@ aaudio_result_t AudioStreamInternalPlay::requestFlush() {
    return mServiceInterface.flushStream(mServiceStreamHandle);
}

void AudioStreamInternalPlay::onFlushFromServer() {
void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();

    // Bump offset so caller does not see the retrograde motion in getFramesRead().
    int64_t framesFlushed = writeCounter - readCounter;
    mFramesOffsetFromService += framesFlushed;
    ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
    int64_t offset = writeCounter - readCounter;
    mFramesOffsetFromService += offset;
    ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);

    // Flush written frames by forcing writeCounter to readCounter.
    // This is because we cannot move the read counter in the hardware.
    // Force writeCounter to match readCounter.
    // This is because we cannot change the read counter in the hardware.
    mAudioEndpoint.setDataWriteCounter(readCounter);
}

void AudioStreamInternalPlay::onFlushFromServer() {
    advanceClientToMatchServerPosition();
}

// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
                                           int64_t timeoutNanoseconds)
@@ -106,6 +111,18 @@ aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t nu
    const char *traceName = "aaWrNow";
    ATRACE_BEGIN(traceName);

    if (mClockModel.isStarting()) {
        // Still haven't got any timestamps from server.
        // Keep waiting until we get some valid timestamps then start writing to the
        // current buffer position.
        ALOGD("processDataNow() wait for valid timestamps");
        // Sleep very briefly and hope we get a timestamp soon.
        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
        ATRACE_END();
        return 0;
    }
    // If we have gotten this far then we have at least one timestamp from server.

    // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
    if (mAudioEndpoint.isFreeRunning()) {
        // Update data queue based on the timing model.
@@ -114,6 +131,13 @@ aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t nu
        mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
    }

    if (mNeedCatchUp.isRequested()) {
        // Catch an MMAP pointer that is already advancing.
        // This will avoid initial underruns caused by a slow cold start.
        advanceClientToMatchServerPosition();
        mNeedCatchUp.acknowledge();
    }

    // If the read index passed the write index then consider it an underrun.
    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
        mXRunCount++;
@@ -153,9 +177,9 @@ aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t nu
                // Calculate frame position based off of the writeCounter because
                // the readCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
                int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
                        - mAudioEndpoint.getBufferSizeInFrames();
                wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
                wakeTime = mClockModel.convertPositionToTime(nextPosition);
            }
                break;
            default:
Loading