Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5edc4eae authored by Phil Burk's avatar Phil Burk
Browse files

aaudio: free endpoint to prevent crashes

Free the AudioEndpoint and check for nullptr to
prevent accessing shared memory that had been freed.
This is to protect against calls to the stream after
AAudioStream_release() has been called.

Bug: 154274446
Bug: 154274027
Test: libaaudio/tests/test_various.cpp
Change-Id: I194d502fd48c4d31602ffce76aca6b28753ad7d2
parent 2a52ad31
Loading
Loading
Loading
Loading
+15 −32
Original line number Diff line number Diff line
@@ -32,19 +32,12 @@ using namespace aaudio;
#define RIDICULOUSLY_LARGE_FRAME_SIZE        4096

AudioEndpoint::AudioEndpoint()
    : mUpCommandQueue(nullptr)
    , mDataQueue(nullptr)
    , mFreeRunning(false)
    : mFreeRunning(false)
    , mDataReadCounter(0)
    , mDataWriteCounter(0)
{
}

AudioEndpoint::~AudioEndpoint() {
    delete mDataQueue;
    delete mUpCommandQueue;
}

// TODO Consider moving to a method in RingBufferDescriptor
static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
                                                  const RingBufferDescriptor *descriptor) {
@@ -144,7 +137,7 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc
        return AAUDIO_ERROR_INTERNAL;
    }

    mUpCommandQueue = new FifoBuffer(
    mUpCommandQueue = std::make_unique<FifoBuffer>(
            descriptor->bytesPerFrame,
            descriptor->capacityInFrames,
            descriptor->readCounterAddress,
@@ -173,7 +166,7 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc
                                  ? &mDataWriteCounter
                                  : descriptor->writeCounterAddress;

    mDataQueue = new FifoBuffer(
    mDataQueue = std::make_unique<FifoBuffer>(
            descriptor->bytesPerFrame,
            descriptor->capacityInFrames,
            readCounterAddress,
@@ -194,18 +187,15 @@ int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
    return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}

int32_t AudioEndpoint::getEmptyFramesAvailable()
{
int32_t AudioEndpoint::getEmptyFramesAvailable() {
    return mDataQueue->getEmptyFramesAvailable();
}

int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
{
int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer) {
    return mDataQueue->getFullDataAvailable(wrappingBuffer);
}

int32_t AudioEndpoint::getFullFramesAvailable()
{
int32_t AudioEndpoint::getFullFramesAvailable() {
    return mDataQueue->getFullFramesAvailable();
}

@@ -217,29 +207,24 @@ void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
    mDataQueue->advanceReadIndex(deltaFrames);
}

void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead)
{
void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead) {
    mDataQueue->setReadCounter(framesRead);
}

fifo_counter_t AudioEndpoint::getDataReadCounter()
{
fifo_counter_t AudioEndpoint::getDataReadCounter() const {
    return mDataQueue->getReadCounter();
}

void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead)
{
void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead) {
    mDataQueue->setWriteCounter(framesRead);
}

fifo_counter_t AudioEndpoint::getDataWriteCounter()
{
fifo_counter_t AudioEndpoint::getDataWriteCounter() const {
    return mDataQueue->getWriteCounter();
}

int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
                                            int32_t *actualFrames)
{
                                            int32_t *actualFrames) {
    if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
        requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
    }
@@ -248,19 +233,17 @@ int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
    return AAUDIO_OK;
}

int32_t AudioEndpoint::getBufferSizeInFrames() const
{
int32_t AudioEndpoint::getBufferSizeInFrames() const {
    return mDataQueue->getThreshold();
}

int32_t AudioEndpoint::getBufferCapacityInFrames() const
{
int32_t AudioEndpoint::getBufferCapacityInFrames() const {
    return (int32_t)mDataQueue->getBufferCapacityInFrames();
}

void AudioEndpoint::dump() const {
    ALOGD("data readCounter  = %lld", (long long) mDataQueue->getReadCounter());
    ALOGD("data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
    ALOGD("data readCounter  = %lld", (long long) getDataReadCounter());
    ALOGD("data writeCounter = %lld", (long long) getDataWriteCounter());
}

void AudioEndpoint::eraseDataMemory() {
+4 −5
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@ class AudioEndpoint {

public:
    AudioEndpoint();
    virtual ~AudioEndpoint();

    /**
     * Configure based on the EndPointDescriptor_t.
@@ -67,11 +66,11 @@ public:
     */
    void setDataReadCounter(android::fifo_counter_t framesRead);

    android::fifo_counter_t getDataReadCounter();
    android::fifo_counter_t getDataReadCounter() const;

    void setDataWriteCounter(android::fifo_counter_t framesWritten);

    android::fifo_counter_t getDataWriteCounter();
    android::fifo_counter_t getDataWriteCounter() const;

    /**
     * The result is not valid until after configure() is called.
@@ -94,8 +93,8 @@ public:
    void dump() const;

private:
    android::FifoBuffer    *mUpCommandQueue;
    android::FifoBuffer    *mDataQueue;
    std::unique_ptr<android::FifoBuffer> mUpCommandQueue;
    std::unique_ptr<android::FifoBuffer> mDataQueue;
    bool                    mFreeRunning;
    android::fifo_counter_t mDataReadCounter; // only used if free-running
    android::fifo_counter_t mDataWriteCounter; // only used if free-running
+38 −23
Original line number Diff line number Diff line
@@ -58,7 +58,6 @@ using namespace aaudio;
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
        : AudioStream()
        , mClockModel()
        , mAudioEndpoint()
        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
        , mInService(inService)
        , mServiceInterface(serviceInterface)
@@ -74,7 +73,6 @@ AudioStreamInternal::~AudioStreamInternal() {
aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {

    aaudio_result_t result = AAUDIO_OK;
    int32_t capacity;
    int32_t framesPerBurst;
    int32_t framesPerHardwareBurst;
    AAudioStreamRequest request;
@@ -173,7 +171,8 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    }

    // Configure endpoint based on descriptor.
    result = mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
    mAudioEndpoint = std::make_unique<AudioEndpoint>();
    result = mAudioEndpoint->configure(&mEndpointDescriptor, getDirection());
    if (result != AAUDIO_OK) {
        goto error;
    }
@@ -201,9 +200,10 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    }
    mFramesPerBurst = framesPerBurst; // only save good value

    capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
    if (capacity < mFramesPerBurst || capacity > MAX_BUFFER_CAPACITY_IN_FRAMES) {
        ALOGE("%s - bufferCapacity out of range = %d", __func__, capacity);
    mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
    if (mBufferCapacityInFrames < mFramesPerBurst
            || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
        ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
        result = AAUDIO_ERROR_OUT_OF_RANGE;
        goto error;
    }
@@ -239,7 +239,7 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    // You can use this offset to reduce glitching.
    // You can also use this offset to force glitching. By iterating over multiple
    // values you can reveal the distribution of the hardware timing jitter.
    if (mAudioEndpoint.isFreeRunning()) { // MMAP?
    if (mAudioEndpoint->isFreeRunning()) { // MMAP?
        int32_t offsetMicros = (getDirection() == AAUDIO_DIRECTION_OUTPUT)
                ? AAudioProperty_getOutputMMapOffsetMicros()
                : AAudioProperty_getInputMMapOffsetMicros();
@@ -251,7 +251,7 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
        mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
    }

    setBufferSize(capacity / 2); // Default buffer size to match Q
    setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q

    setState(AAUDIO_STREAM_STATE_OPEN);

@@ -280,6 +280,11 @@ aaudio_result_t AudioStreamInternal::release_l() {

        mServiceInterface.closeStream(serviceStreamHandle);
        mCallbackBuffer.reset();

        // Update local frame counters so we can query them after releasing the endpoint.
        getFramesRead();
        getFramesWritten();
        mAudioEndpoint.reset();
        result = mEndPointParcelable.close();
        aaudio_result_t result2 = AudioStream::release_l();
        return (result != AAUDIO_OK) ? result : result2;
@@ -538,7 +543,7 @@ aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *mes
        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
            // Prevent hardware from looping on old data and making buzzing sounds.
            if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
                mAudioEndpoint.eraseDataMemory();
                mAudioEndpoint->eraseDataMemory();
            }
            result = AAUDIO_ERROR_DISCONNECTED;
            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
@@ -564,7 +569,10 @@ aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {

    while (result == AAUDIO_OK) {
        AAudioServiceMessage message;
        if (mAudioEndpoint.readUpCommand(&message) != 1) {
        if (!mAudioEndpoint) {
            break;
        }
        if (mAudioEndpoint->readUpCommand(&message) != 1) {
            break; // no command this time, no problem
        }
        switch (message.what) {
@@ -592,7 +600,10 @@ aaudio_result_t AudioStreamInternal::processCommands() {

    while (result == AAUDIO_OK) {
        AAudioServiceMessage message;
        if (mAudioEndpoint.readUpCommand(&message) != 1) {
        if (!mAudioEndpoint) {
            break;
        }
        if (mAudioEndpoint->readUpCommand(&message) != 1) {
            break; // no command this time, no problem
        }
        switch (message.what) {
@@ -625,7 +636,7 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
    const char * fifoName = "aaRdy";
    ATRACE_BEGIN(traceName);
    if (ATRACE_ENABLED()) {
        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
        int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }

@@ -654,7 +665,7 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
        if (timeoutNanoseconds == 0) {
            break; // don't block
        } else if (wakeTimeNanos != 0) {
            if (!mAudioEndpoint.isFreeRunning()) {
            if (!mAudioEndpoint->isFreeRunning()) {
                // If there is software on the other end of the FIFO then it may get delayed.
                // So wake up just a little after we expect it to be ready.
                wakeTimeNanos += mWakeupDelayNanos;
@@ -679,12 +690,12 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
                ALOGW("processData(): past deadline by %d micros",
                      (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
                mClockModel.dump();
                mAudioEndpoint.dump();
                mAudioEndpoint->dump();
                break;
            }

            if (ATRACE_ENABLED()) {
                int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
                int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
                ATRACE_INT(fifoName, fullFrames);
                int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
                ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
@@ -696,7 +707,7 @@ aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames
    }

    if (ATRACE_ENABLED()) {
        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
        int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
        ATRACE_INT(fifoName, fullFrames);
    }

@@ -730,11 +741,15 @@ aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
        adjustedFrames = std::min(maximumSize, adjustedFrames);
    }

    if (mAudioEndpoint) {
        // Clip against the actual size from the endpoint.
        int32_t actualFrames = 0;
    mAudioEndpoint.setBufferSizeInFrames(maximumSize, &actualFrames);
        // Set to maximum size so we can write extra data when ready in order to reduce glitches.
        // The amount we keep in the buffer is controlled by mBufferSizeInFrames.
        mAudioEndpoint->setBufferSizeInFrames(maximumSize, &actualFrames);
        // actualFrames should be <= actual maximum size of endpoint
        adjustedFrames = std::min(actualFrames, adjustedFrames);
    }

    mBufferSizeInFrames = adjustedFrames;
    ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
@@ -746,7 +761,7 @@ int32_t AudioStreamInternal::getBufferSize() const {
}

int32_t AudioStreamInternal::getBufferCapacity() const {
    return mAudioEndpoint.getBufferCapacityInFrames();
    return mBufferCapacityInFrames;
}

int32_t AudioStreamInternal::getFramesPerBurst() const {
@@ -759,5 +774,5 @@ aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
}

bool AudioStreamInternal::isClockModelInControl() const {
    return isActive() && mAudioEndpoint.isFreeRunning() && mClockModel.isRunning();
    return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
}
+7 −1
Original line number Diff line number Diff line
@@ -155,7 +155,8 @@ protected:

    IsochronousClockModel    mClockModel;      // timing model for chasing the HAL

    AudioEndpoint            mAudioEndpoint;   // source for reads or sink for writes
    std::unique_ptr<AudioEndpoint> mAudioEndpoint;   // source for reads or sink for writes

    aaudio_handle_t          mServiceStreamHandle; // opaque handle returned from service

    int32_t                  mFramesPerBurst = MIN_FRAMES_PER_BURST; // frames per HAL transfer
@@ -178,6 +179,9 @@ protected:

    float                    mStreamVolume = 1.0f;

    int64_t                  mLastFramesWritten = 0;
    int64_t                  mLastFramesRead = 0;

private:
    /*
     * Asynchronous write with data conversion.
@@ -207,6 +211,8 @@ private:
    int32_t                  mDeviceChannelCount = 0;

    int32_t                  mBufferSizeInFrames = 0; // local threshold to control latency
    int32_t                  mBufferCapacityInFrames = 0;


};

+22 −19
Original line number Diff line number Diff line
@@ -42,8 +42,8 @@ AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}

void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
    int64_t readCounter = mAudioEndpoint->getDataReadCounter();
    int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();

    // Bump offset so caller does not see the retrograde motion in getFramesRead().
    int64_t offset = readCounter - writeCounter;
@@ -53,7 +53,7 @@ void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {

    // Force readCounter to match writeCounter.
    // This is because we cannot change the write counter in the hardware.
    mAudioEndpoint.setDataReadCounter(writeCounter);
    mAudioEndpoint->setDataReadCounter(writeCounter);
}

// Write the data, block if needed and timeoutMillis > 0
@@ -86,7 +86,7 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
    }
    // If we have gotten this far then we have at least one timestamp from server.

    if (mAudioEndpoint.isFreeRunning()) {
    if (mAudioEndpoint->isFreeRunning()) {
        //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
        // Update data queue based on the timing model.
        // Jitter in the DSP can cause late writes to the FIFO.
@@ -95,7 +95,7 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
        // that the DSP could have written the data.
        int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
        // TODO refactor, maybe use setRemoteCounter()
        mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
        mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
    }

    // This code assumes that we have already received valid timestamps.
@@ -108,8 +108,8 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t

    // If the capture buffer is full beyond capacity then consider it an overrun.
    // For shared streams, the xRunCount is passed up from the service.
    if (mAudioEndpoint.isFreeRunning()
        && mAudioEndpoint.getFullFramesAvailable() > mAudioEndpoint.getBufferCapacityInFrames()) {
    if (mAudioEndpoint->isFreeRunning()
        && mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
        mXRunCount++;
        if (ATRACE_ENABLED()) {
            ATRACE_INT("aaOverRuns", mXRunCount);
@@ -143,7 +143,7 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
                // Calculate frame position based off of the readCounter because
                // the writeCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
                int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + mFramesPerBurst;
                wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
            }
                break;
@@ -166,7 +166,7 @@ aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
    uint8_t *destination = (uint8_t *) buffer;
    int32_t framesLeft = numFrames;

    mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
    mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);

    // Read data in one or two parts.
    for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
@@ -208,26 +208,29 @@ aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
    }

    int32_t framesProcessed = numFrames - framesLeft;
    mAudioEndpoint.advanceReadIndex(framesProcessed);
    mAudioEndpoint->advanceReadIndex(framesProcessed);

    //ALOGD("readNowWithConversion() returns %d", framesProcessed);
    return framesProcessed;
}

int64_t AudioStreamInternalCapture::getFramesWritten() {
    if (mAudioEndpoint) {
        const int64_t framesWrittenHardware = isClockModelInControl()
                ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
            : mAudioEndpoint.getDataWriteCounter();
                : mAudioEndpoint->getDataWriteCounter();
        // Add service offset and prevent retrograde motion.
        mLastFramesWritten = std::max(mLastFramesWritten,
                                      framesWrittenHardware + mFramesOffsetFromService);
    }
    return mLastFramesWritten;
}

int64_t AudioStreamInternalCapture::getFramesRead() {
    int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
    //ALOGD("getFramesRead() returns %lld", (long long)frames);
    return frames;
    if (mAudioEndpoint) {
        mLastFramesRead = mAudioEndpoint->getDataReadCounter() + mFramesOffsetFromService;
    }
    return mLastFramesRead;
}

// Read data from the stream and pass it to the callback for processing.
Loading