Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f17f4905 authored by Phil Burk's avatar Phil Burk Committed by android-build-merger
Browse files

Merge "aaudio: reduce glitching by improving sleep timing" into oc-dr1-dev

am: d937a5cf

Change-Id: Ic8e3f97c88ec2197a6d66a48eb091d8eea166891
parents 2eddd660 d937a5cf
Loading
Loading
Loading
Loading
+16 −12
Original line number Diff line number Diff line
@@ -113,7 +113,8 @@ aaudio_result_t AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndp
    return result;
}

aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor,
                                         aaudio_direction_t   direction)
{
    aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
    if (result != AAUDIO_OK) {
@@ -143,12 +144,20 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc
            descriptor->dataAddress
    );

    // ============================ down data queue =============================
    // ============================ data queue =============================
    descriptor = &pEndpointDescriptor->dataQueueDescriptor;
    ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
    ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
    mFreeRunning = descriptor->readCounterAddress == nullptr;
    ALOGV("AudioEndpoint::configure() data readCounterAddress = %p",
          descriptor->readCounterAddress);

    // An example of free running is when the other side is read or written by hardware DMA
    // or a DSP. It does not update its counter so we have to update it.
    int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
                             ? descriptor->readCounterAddress // read by other side
                             : descriptor->writeCounterAddress; // written by other side
    mFreeRunning = (remoteCounter == nullptr);
    ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);

    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
                                  ? &mDataReadCounter
                                  : descriptor->readCounterAddress;
@@ -173,13 +182,8 @@ aaudio_result_t AudioEndpoint::readUpCommand(AAudioServiceMessage *commandPtr)
    return mUpCommandQueue->read(commandPtr, 1);
}

aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
{
    return mDataQueue->write(buffer, numFrames);
}

void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
    mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
    return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}

int32_t AudioEndpoint::getEmptyFramesAvailable()
@@ -187,7 +191,7 @@ int32_t AudioEndpoint::getEmptyFramesAvailable()
    return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
}

void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
{
    return mDataQueue->getFullDataAvailable(wrappingBuffer);
}
+4 −9
Original line number Diff line number Diff line
@@ -40,7 +40,8 @@ public:
    /**
     * Configure based on the EndPointDescriptor_t.
     */
    aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
    aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
                              aaudio_direction_t direction);

    /**
     * Read from a command passed up from the Server.
@@ -48,17 +49,11 @@ public:
     */
    aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);

    /**
     * Non-blocking write.
     * @return framesWritten or a negative error code.
     */
    aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);

    void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
    int32_t getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);

    int32_t getEmptyFramesAvailable();

    void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
    int32_t getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);

    int32_t getFullFramesAvailable();

+38 −12
Original line number Diff line number Diff line
@@ -28,10 +28,10 @@
#include <binder/IServiceManager.h>

#include <aaudio/AAudio.h>
#include <cutils/properties.h>
#include <utils/String16.h>
#include <utils/Trace.h>

#include "AudioClock.h"
#include "AudioEndpointParcelable.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
@@ -39,6 +39,7 @@
#include "binding/AAudioServiceMessage.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"

#include "AudioStreamInternal.h"
@@ -64,7 +65,12 @@ AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterfa
        , mFramesPerBurst(16)
        , mStreamVolume(1.0f)
        , mInService(inService)
        , mServiceInterface(serviceInterface) {
        , mServiceInterface(serviceInterface)
        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        {
    ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
          mWakeupDelayNanos, mMinimumSleepNanos);
}

AudioStreamInternal::~AudioStreamInternal() {
@@ -135,7 +141,7 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
        }

        // Configure endpoint based on descriptor.
        mAudioEndpoint.configure(&mEndpointDescriptor);
        mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());

        mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
        int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
@@ -472,12 +478,12 @@ aaudio_result_t AudioStreamInternal::processCommands() {
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
                                                 int64_t timeoutNanoseconds)
{
    const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
    const char * traceName = "aaProc";
    const char * fifoName = "aaRdy";
    ATRACE_BEGIN(traceName);
    int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
    if (ATRACE_ENABLED()) {
        const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
        ATRACE_INT(traceName, fullFrames);
        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }

    aaudio_result_t result = AAUDIO_OK;
@@ -505,10 +511,12 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
        if (timeoutNanoseconds == 0) {
            break; // don't block
        } else if (framesLeft > 0) {
            // clip the wake time to something reasonable
            if (wakeTimeNanos < currentTimeNanos) {
                wakeTimeNanos = currentTimeNanos;
            if (!mAudioEndpoint.isFreeRunning()) {
                // If there is software on the other end of the FIFO then it may get delayed.
                // So wake up just a little after we expect it to be ready.
                wakeTimeNanos += mWakeupDelayNanos;
            }

            if (wakeTimeNanos > deadlineNanos) {
                // If we time out, just return the framesWritten so far.
                // TODO remove after we fix the deadline bug
@@ -525,12 +533,30 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
                break;
            }

            currentTimeNanos = AudioClock::getNanoseconds();
            int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
            // Guarantee a minimum sleep time.
            if (wakeTimeNanos < earliestWakeTime) {
                wakeTimeNanos = earliestWakeTime;
            }

            if (ATRACE_ENABLED()) {
                int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
                ATRACE_INT(fifoName, fullFrames);
                int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
            AudioClock::sleepForNanos(sleepForNanos);
                ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
            }

            AudioClock::sleepUntilNanoTime(wakeTimeNanos);
            currentTimeNanos = AudioClock::getNanoseconds();
        }
    }

    if (ATRACE_ENABLED()) {
        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }

    // return error or framesProcessed
    (void) loopCount;
    ATRACE_END();
+6 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include "client/IsochronousClockModel.h"
#include "client/AudioEndpoint.h"
#include "core/AudioStream.h"
#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"

using android::sp;
@@ -173,6 +174,11 @@ private:
    // Adjust timing model based on timestamp from service.
    void processTimestamp(uint64_t position, int64_t time);

    // Thread on other side of FIFO will have wakeup jitter.
    // By delaying slightly we can avoid waking up before other side is ready.
    const int32_t            mWakeupDelayNanos; // delay past typical wakeup jitter
    const int32_t            mMinimumSleepNanos; // minimum sleep while polling

    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
};
+20 −11
Original line number Diff line number Diff line
@@ -24,6 +24,9 @@
#include "client/AudioStreamInternalCapture.h"
#include "utility/AudioClock.h"

#define ATRACE_TAG ATRACE_TAG_AUDIO
#include <utils/Trace.h>

using android::WrappingBuffer;

using namespace aaudio;
@@ -36,7 +39,6 @@ AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &

AudioStreamInternalCapture::~AudioStreamInternalCapture() {}


// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
                                               int64_t timeoutNanoseconds)
@@ -52,6 +54,9 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
        return result;
    }

    const char *traceName = "aaRdNow";
    ATRACE_BEGIN(traceName);

    if (mAudioEndpoint.isFreeRunning()) {
        //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
        // Update data queue based on the timing model.
@@ -63,6 +68,9 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
    // If the write index passed the read index then consider it an overrun.
    if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
        mXRunCount++;
        if (ATRACE_ENABLED()) {
            ATRACE_INT("aaOverRuns", mXRunCount);
        }
    }

    // Read some data from the buffer.
@@ -70,6 +78,9 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
    int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
    //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
    //    numFrames, framesProcessed);
    if (ATRACE_ENABLED()) {
        ATRACE_INT("aaRead", framesProcessed);
    }

    // Calculate an ideal time to wake up.
    if (wakeTimePtr != nullptr && framesProcessed >= 0) {
@@ -82,14 +93,14 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
            case AAUDIO_STREAM_STATE_OPEN:
            case AAUDIO_STREAM_STATE_STARTING:
                break;
            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
            case AAUDIO_STREAM_STATE_STARTED:
            {
                uint32_t burstSize = mFramesPerBurst;
                if (burstSize < 32) {
                    burstSize = 32; // TODO review
                }
                // When do we expect the next write burst to occur?

                uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
                // Calculate frame position based off of the readCounter because
                // the writeCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
                wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
            }
                break;
@@ -99,10 +110,8 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
        *wakeTimePtr = wakeTime;

    }
//    ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
//         (unsigned long long)currentNanoTime,
//         (unsigned long long)mAudioEndpoint.getDataReadCounter(),
//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());

    ATRACE_END();
    return framesProcessed;
}

Loading