Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d4f0062 authored by Phil Burk's avatar Phil Burk
Browse files

aaudio: write if there is room

Go ahead and write the data to the buffer if there is room,
even if it exceeds the setBufferSizeInFrames().

There is no point of holding it in the application buffer.
By making the data available to the DSP we can reduce glitches
or reduce latency.

Because this offers better glitch protection, we can allow
setBufferSizeInFrames() to accept a buffer size of zero.
This will provide the lowest possible latency.

Test: OboeTester glitch test
Change-Id: Ie706867c5dd57c29b4393c806cd51ae09198873e
parent 1865ec9e
Loading
Loading
Loading
Loading
+20 −19
Original line number Original line Diff line number Diff line
@@ -653,7 +653,7 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
        // Should we block?
        // Should we block?
        if (timeoutNanoseconds == 0) {
        if (timeoutNanoseconds == 0) {
            break; // don't block
            break; // don't block
        } else if (framesLeft > 0) {
        } else if (wakeTimeNanos != 0) {
            if (!mAudioEndpoint.isFreeRunning()) {
            if (!mAudioEndpoint.isFreeRunning()) {
                // If there is software on the other end of the FIFO then it may get delayed.
                // If there is software on the other end of the FIFO then it may get delayed.
                // So wake up just a little after we expect it to be ready.
                // So wake up just a little after we expect it to be ready.
@@ -712,37 +712,38 @@ void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {


aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
    int32_t adjustedFrames = requestedFrames;
    int32_t adjustedFrames = requestedFrames;
    int32_t actualFrames = 0;
    const int32_t maximumSize = getBufferCapacity() - mFramesPerBurst;
    int32_t maximumSize = getBufferCapacity();
    // The buffer size can be set to zero.
    // This means that the callback may be called when the internal buffer becomes empty.
    // This will be fine on some devices in ideal circumstances and will result in the
    // lowest possible latency.
    // If there are glitches then they should be detected as XRuns and the size can be increased.
    static const int32_t minimumSize = 0;


    // Clip to minimum size so that rounding up will work better.
    // Clip to minimum size so that rounding up will work better.
    if (adjustedFrames < 1) {
    adjustedFrames = std::max(minimumSize, adjustedFrames);
        adjustedFrames = 1;
    }


    if (adjustedFrames > maximumSize) {
    // Prevent arithmetic overflow by clipping before we round.
        // Clip to maximum size.
    if (adjustedFrames >= maximumSize) {
        adjustedFrames = maximumSize;
        adjustedFrames = maximumSize;
    } else {
    } else {
        // Round to the next highest burst size.
        // Round to the next highest burst size.
        int32_t numBursts = (adjustedFrames + mFramesPerBurst - 1) / mFramesPerBurst;
        int32_t numBursts = (adjustedFrames + mFramesPerBurst - 1) / mFramesPerBurst;
        adjustedFrames = numBursts * mFramesPerBurst;
        adjustedFrames = numBursts * mFramesPerBurst;
        // Rounding may have gone above maximum.
        if (adjustedFrames > maximumSize) {
            adjustedFrames = maximumSize;
        }
    }
    }


    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(adjustedFrames, &actualFrames);
    // Clip against the actual size from the endpoint.
    if (result < 0) {
    int32_t actualFrames = 0;
        return result;
    mAudioEndpoint.setBufferSizeInFrames(maximumSize, &actualFrames);
    } else {
    // actualFrames should be <= maximumSize
        return (aaudio_result_t) actualFrames;
    adjustedFrames = std::min(actualFrames, adjustedFrames);
    }

    mBufferSizeInFrames = adjustedFrames;
    return (aaudio_result_t) adjustedFrames;
}
}


int32_t AudioStreamInternal::getBufferSize() const {
int32_t AudioStreamInternal::getBufferSize() const {
    return mAudioEndpoint.getBufferSizeInFrames();
    return mBufferSizeInFrames;
}
}


int32_t AudioStreamInternal::getBufferCapacity() const {
int32_t AudioStreamInternal::getBufferCapacity() const {
+3 −0
Original line number Original line Diff line number Diff line
@@ -204,6 +204,9 @@ private:
    // Sometimes the hardware is operating with a different channel count from the app.
    // Sometimes the hardware is operating with a different channel count from the app.
    // Then we require conversion in AAudio.
    // Then we require conversion in AAudio.
    int32_t                  mDeviceChannelCount = 0;
    int32_t                  mDeviceChannelCount = 0;

    int32_t                  mBufferSizeInFrames = 0; // local threshold to control latency

};
};


} /* namespace aaudio */
} /* namespace aaudio */
+7 −9
Original line number Original line Diff line number Diff line
@@ -167,8 +167,10 @@ aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t nu
        ATRACE_INT("aaWrote", framesWritten);
        ATRACE_INT("aaWrote", framesWritten);
    }
    }


    // Sleep if there is too much data in the buffer.
    // Calculate an ideal time to wake up.
    // Calculate an ideal time to wake up.
    if (wakeTimePtr != nullptr && framesWritten >= 0) {
    if (wakeTimePtr != nullptr
            && (mAudioEndpoint.getFullFramesAvailable() >= getBufferSize())) {
        // By default wake up a few milliseconds from now.  // TODO review
        // By default wake up a few milliseconds from now.  // TODO review
        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
        aaudio_stream_state_t state = getState();
        aaudio_stream_state_t state = getState();
@@ -184,14 +186,10 @@ aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t nu
                break;
                break;
            case AAUDIO_STREAM_STATE_STARTED:
            case AAUDIO_STREAM_STATE_STARTED:
            {
            {
                // When do we expect the next read burst to occur?
                // Sleep until the readCounter catches up and we only have

                // the getBufferSize() frames of data sitting in the buffer.
                // Calculate frame position based off of the writeCounter because
                int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() - getBufferSize();
                // the readCounter might have just advanced in the background,
                wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
                // causing us to sleep until a later burst.
                int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
                        - mAudioEndpoint.getBufferSizeInFrames();
                wakeTime = mClockModel.convertPositionToTime(nextPosition);
            }
            }
                break;
                break;
            default:
            default: