Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 703ddd9d authored by Phil Burk's avatar Phil Burk Committed by android-build-merger
Browse files

Merge "aaudio: prevent memory leak from double configure" into oc-mr1-dev am: 8a37a466

am: e6cef0ec

Change-Id: I60fbfb58f014f1addf3893b49d62d3a090276ace
parents 42aaf443 e6cef0ec
Loading
Loading
Loading
Loading
+11 −7
Original line number Diff line number Diff line
@@ -121,24 +121,28 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc
{
    aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
    if (result != AAUDIO_OK) {
        ALOGE("AudioEndpoint_validateQueueDescriptor returned %d %s",
              result, AAudio_convertResultToText(result));
        return result;
    }

    // ============================ up message queue =============================
    const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
    if(descriptor->bytesPerFrame != sizeof(AAudioServiceMessage)) {
        ALOGE("AudioEndpoint::configure() bytesPerFrame != sizeof(AAudioServiceMessage) = %d",
        ALOGE("AudioEndpoint.configure() bytesPerFrame != sizeof(AAudioServiceMessage) = %d",
              descriptor->bytesPerFrame);
        return AAUDIO_ERROR_INTERNAL;
    }

    if(descriptor->readCounterAddress == nullptr || descriptor->writeCounterAddress == nullptr) {
        ALOGE("AudioEndpoint_validateQueueDescriptor() NULL counter address");
        ALOGE("AudioEndpoint.configure() NULL counter address");
        return AAUDIO_ERROR_NULL;
    }

    // Prevent memory leak and reuse.
    if(mUpCommandQueue != nullptr || mDataQueue != nullptr) {
        ALOGE("AudioEndpoint.configure() endpoint already used");
        return AAUDIO_ERROR_INTERNAL;
    }

    mUpCommandQueue = new FifoBuffer(
            descriptor->bytesPerFrame,
            descriptor->capacityInFrames,
@@ -149,8 +153,8 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc

    // ============================ data queue =============================
    descriptor = &pEndpointDescriptor->dataQueueDescriptor;
    ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
    ALOGV("AudioEndpoint::configure() data readCounterAddress = %p",
    ALOGV("AudioEndpoint.configure() data framesPerBurst = %d", descriptor->framesPerBurst);
    ALOGV("AudioEndpoint.configure() data readCounterAddress = %p",
          descriptor->readCounterAddress);

    // An example of free running is when the other side is read or written by hardware DMA
@@ -159,7 +163,7 @@ aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDesc
                             ? descriptor->readCounterAddress // read by other side
                             : descriptor->writeCounterAddress; // written by other side
    mFreeRunning = (remoteCounter == nullptr);
    ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
    ALOGV("AudioEndpoint.configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);

    int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
                                  ? &mDataReadCounter
+83 −69
Original line number Diff line number Diff line
@@ -80,9 +80,16 @@ AudioStreamInternal::~AudioStreamInternal() {
aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {

    aaudio_result_t result = AAUDIO_OK;
    int32_t capacity;
    AAudioStreamRequest request;
    AAudioStreamConfiguration configuration;
    AAudioStreamConfiguration configurationOutput;

    if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
        ALOGE("AudioStreamInternal::open(): already open! state = %d", getState());
        return AAUDIO_ERROR_INVALID_STATE;
    }

    // Copy requested parameters to the stream.
    result = AudioStream::open(builder);
    if (result < 0) {
        return result;
@@ -109,52 +116,56 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {

    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());

    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
    mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
    if (mServiceStreamHandle < 0) {
        result = mServiceStreamHandle;
        ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
    } else {
        result = configuration.validate();
        if (result != AAUDIO_OK) {
            close();
        ALOGE("AudioStreamInternal::open(): openStream() returned %d", result);
        return result;
    }

    result = configurationOutput.validate();
    if (result != AAUDIO_OK) {
        goto error;
    }
    // Save results of the open.
        setSampleRate(configuration.getSampleRate());
        setSamplesPerFrame(configuration.getSamplesPerFrame());
        setDeviceId(configuration.getDeviceId());
        setSharingMode(configuration.getSharingMode());
    setSampleRate(configurationOutput.getSampleRate());
    setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
    setDeviceId(configurationOutput.getDeviceId());
    setSharingMode(configurationOutput.getSharingMode());

    // Save device format so we can do format conversion and volume scaling together.
        mDeviceFormat = configuration.getFormat();
    mDeviceFormat = configurationOutput.getFormat();

    result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
    if (result != AAUDIO_OK) {
            mServiceInterface.closeStream(mServiceStreamHandle);
            return result;
        goto error;
    }

        // resolve parcelable into a descriptor
    // Resolve parcelable into a descriptor.
    result = mEndPointParcelable.resolve(&mEndpointDescriptor);
    if (result != AAUDIO_OK) {
            mServiceInterface.closeStream(mServiceStreamHandle);
            return result;
        goto error;
    }

    // Configure endpoint based on descriptor.
        mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
    result = mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
    if (result != AAUDIO_OK) {
        goto error;
    }

    mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
        int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
    capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;

    // Validate result from server.
    if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
            ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
            return AAUDIO_ERROR_OUT_OF_RANGE;
        ALOGE("AudioStreamInternal::open(): framesPerBurst out of range = %d", mFramesPerBurst);
        result = AAUDIO_ERROR_OUT_OF_RANGE;
        goto error;
    }
    if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
            ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
            return AAUDIO_ERROR_OUT_OF_RANGE;
        ALOGE("AudioStreamInternal::open(): bufferCapacity out of range = %d", capacity);
        result = AAUDIO_ERROR_OUT_OF_RANGE;
        goto error;
    }

    mClockModel.setSampleRate(getSampleRate());
@@ -165,13 +176,13 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
        if (mCallbackFrames > getBufferCapacity() / 2) {
            ALOGE("AudioStreamInternal::open(): framesPerCallback too big = %d, capacity = %d",
                  mCallbackFrames, getBufferCapacity());
                mServiceInterface.closeStream(mServiceStreamHandle);
                return AAUDIO_ERROR_OUT_OF_RANGE;
            result = AAUDIO_ERROR_OUT_OF_RANGE;
            goto error;

        } else if (mCallbackFrames < 0) {
            ALOGE("AudioStreamInternal::open(): framesPerCallback negative");
                mServiceInterface.closeStream(mServiceStreamHandle);
                return AAUDIO_ERROR_OUT_OF_RANGE;
            result = AAUDIO_ERROR_OUT_OF_RANGE;
            goto error;

        }
        if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
@@ -185,11 +196,15 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    }

    setState(AAUDIO_STREAM_STATE_OPEN);
        // only connect to AudioManager if this is a playback stream running in client process
    // Only connect to AudioManager if this is a playback stream running in client process.
    if (!mInService && getDirection() == AAUDIO_DIRECTION_OUTPUT) {
        init(android::PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA);
    }
    }

    return result;

error:
    close();
    return result;
}

@@ -224,7 +239,6 @@ aaudio_result_t AudioStreamInternal::close() {
    }
}


static void *aaudio_callback_thread_proc(void *context)
{
    AudioStreamInternal *stream = (AudioStreamInternal *)context;