Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b62c79b1 authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Android (Google) Code Review
Browse files

Merge "aaudio: convert mono output to stereo"

parents 3859e62d 41f19d8e
Loading
Loading
Loading
Loading
+22 −6
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    request.setUserId(getuid());
    request.setProcessId(getpid());
    request.setSharingModeMatchRequired(isSharingModeMatchRequired());
    request.setInService(mInService);
    request.setInService(isInService());

    request.getConfiguration().setDeviceId(getDeviceId());
    request.getConfiguration().setSampleRate(getSampleRate());
@@ -118,11 +118,24 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {

    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());

    mDeviceChannelCount = getSamplesPerFrame(); // Assume it will be the same. Update if not.

    mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
    if (mServiceStreamHandle < 0
            && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
            && getDirection() == AAUDIO_DIRECTION_OUTPUT
            && !isInService()) {
        // if that failed then try switching from mono to stereo if OUTPUT.
        // Only do this in the client. Otherwise we end up with a mono mixer in the service
        // that writes to a stereo MMAP stream.
        ALOGD("%s - openStream() returned %d, try switching from MONO to STEREO",
              __func__, mServiceStreamHandle);
        request.getConfiguration().setSamplesPerFrame(2); // stereo
        mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
    }
    if (mServiceStreamHandle < 0) {
        result = mServiceStreamHandle;
        ALOGE("%s - openStream() returned %d", __func__, result);
        return result;
        ALOGE("%s - openStream() returned %d", __func__, mServiceStreamHandle);
        return mServiceStreamHandle;
    }

    result = configurationOutput.validate();
@@ -130,8 +143,12 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
        goto error;
    }
    // Save results of the open.
    setSampleRate(configurationOutput.getSampleRate());
    if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
        setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
    }
    mDeviceChannelCount = configurationOutput.getSamplesPerFrame();

    setSampleRate(configurationOutput.getSampleRate());
    setDeviceId(configurationOutput.getDeviceId());
    setSessionId(configurationOutput.getSessionId());
    setSharingMode(configurationOutput.getSharingMode());
@@ -160,7 +177,6 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
        goto error;
    }


    // Validate result from server.
    framesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
    if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
+13 −1
Original line number Diff line number Diff line
@@ -138,7 +138,14 @@ protected:
    // Calculate timeout for an operation involving framesPerOperation.
    int64_t calculateReasonableTimeout(int32_t framesPerOperation);

    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
    aaudio_format_t getDeviceFormat() const { return mDeviceFormat; }

    int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }

    /**
     * @return true if running in audio service, versus in app process
     */
    bool isInService() const { return mInService; }

    IsochronousClockModel    mClockModel;      // timing model for chasing the HAL

@@ -187,6 +194,11 @@ private:
    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses

    int64_t                  mServiceLatencyNanos = 0;

    // Sometimes the hardware is operating with a different format or channel count from the app.
    // Then we require conversion in AAudio.
    aaudio_format_t          mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
    int32_t                  mDeviceChannelCount = 0;
};

} /* namespace aaudio */
+3 −3
Original line number Diff line number Diff line
@@ -176,16 +176,16 @@ aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
        int32_t numSamples = framesToProcess * getSamplesPerFrame();

        // TODO factor this out into a utility function
        if (mDeviceFormat == getFormat()) {
        if (getDeviceFormat() == getFormat()) {
            memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
                   && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
            AAudioConvert_pcm16ToFloat(
                    (const int16_t *) wrappingBuffer.data[partIndex],
                    (float *) destination,
                    numSamples,
                    1.0f);
        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
        } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
                   && getFormat() == AAUDIO_FORMAT_PCM_I16) {
            AAudioConvert_floatToPcm16(
                    (const float *) wrappingBuffer.data[partIndex],
+17 −60
Original line number Diff line number Diff line
@@ -206,7 +206,7 @@ aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buff
    // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
    //              buffer, numFrames);
    WrappingBuffer wrappingBuffer;
    uint8_t *source = (uint8_t *) buffer;
    uint8_t *byteBuffer = (uint8_t *) buffer;
    int32_t framesLeft = numFrames;

    mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
@@ -220,69 +220,26 @@ aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buff
            if (framesToWrite > framesAvailable) {
                framesToWrite = framesAvailable;
            }

            int32_t numBytes = getBytesPerFrame() * framesToWrite;
            int32_t numSamples = framesToWrite * getSamplesPerFrame();
            // Data conversion.
            float levelFrom;
            float levelTo;
            bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
            // The formats are validated when the stream is opened so we do not have to
            // check for illegal combinations here.
            // TODO factor this out into a utility function
            if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
                    AAudio_linearRamp(
                            (const float *) source,
                            (float *) wrappingBuffer.data[partIndex],
                            framesToWrite,
                            getSamplesPerFrame(),
                            levelFrom,
                            levelTo);
                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
                    if (ramping) {
                        AAudioConvert_floatToPcm16(
                                (const float *) source,
                                (int16_t *) wrappingBuffer.data[partIndex],
                                framesToWrite,
                                getSamplesPerFrame(),
                                levelFrom,
                                levelTo);
                    } else {
                        AAudioConvert_floatToPcm16(
                                (const float *) source,
                                (int16_t *) wrappingBuffer.data[partIndex],
                                numSamples,
                                levelTo);
                    }
                }
            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
                    if (ramping) {
                        AAudioConvert_pcm16ToFloat(
                                (const int16_t *) source,
                                (float *) wrappingBuffer.data[partIndex],
                                framesToWrite,
                                getSamplesPerFrame(),
                                levelFrom,
                                levelTo);
                    } else {
                        AAudioConvert_pcm16ToFloat(
                                (const int16_t *) source,
                                (float *) wrappingBuffer.data[partIndex],
                                numSamples,
                                levelTo);
                    }
                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
                    AAudio_linearRamp(
                            (const int16_t *) source,
                            (int16_t *) wrappingBuffer.data[partIndex],
                            framesToWrite,
                            getSamplesPerFrame(),
                            levelFrom,
                            levelTo);
                }
            }
            source += numBytes;
            mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);

            AAudioDataConverter::FormattedData source(
                    (void *)byteBuffer,
                    getFormat(),
                    getSamplesPerFrame());
            AAudioDataConverter::FormattedData destination(
                    wrappingBuffer.data[partIndex],
                    getDeviceFormat(),
                    getDeviceChannelCount());

            AAudioDataConverter::convert(source, destination, framesToWrite,
                                         levelFrom, levelTo);

            byteBuffer += numBytes;
            framesLeft -= framesToWrite;
        } else {
            break;
+255 −5
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#include <aaudio/AAudioTesting.h>
#include <math.h>
#include <system/audio-base.h>
#include <assert.h>

#include "utility/AAudioUtilities.h"

@@ -72,7 +73,7 @@ void AAudioConvert_floatToPcm16(const float *source,
                                int16_t *destination,
                                int32_t numSamples,
                                float amplitude) {
    float scaler = amplitude;
    const float scaler = amplitude;
    for (int i = 0; i < numSamples; i++) {
        float sample = *source++;
        *destination++ = clipAndClampFloatToPcm16(sample, scaler);
@@ -103,7 +104,7 @@ void AAudioConvert_pcm16ToFloat(const int16_t *source,
                                float *destination,
                                int32_t numSamples,
                                float amplitude) {
    float scaler = amplitude / SHORT_SCALE;
    const float scaler = amplitude / SHORT_SCALE;
    for (int i = 0; i < numSamples; i++) {
        destination[i] = source[i] * scaler;
    }
@@ -117,7 +118,7 @@ void AAudioConvert_pcm16ToFloat(const int16_t *source,
                                float amplitude1,
                                float amplitude2) {
    float scaler = amplitude1 / SHORT_SCALE;
    float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
    const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
            *destination++ = *source++ * scaler;
@@ -134,7 +135,7 @@ void AAudio_linearRamp(const float *source,
                       float amplitude1,
                       float amplitude2) {
    float scaler = amplitude1;
    float delta = (amplitude2 - amplitude1) / numFrames;
    const float delta = (amplitude2 - amplitude1) / numFrames;
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
            float sample = *source++;
@@ -158,7 +159,7 @@ void AAudio_linearRamp(const int16_t *source,
                       float amplitude2) {
    // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
    float scaler = amplitude1;
    float delta = (amplitude2 - amplitude1) / numFrames;
    const float delta = (amplitude2 - amplitude1) / numFrames;
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
            // No need to clip because int16_t range is inherently limited.
@@ -169,6 +170,255 @@ void AAudio_linearRamp(const int16_t *source,
    }
}

// *************************************************************************************
// Convert Mono To Stereo at the same time as converting format.
void AAudioConvert_formatMonoToStereo(const float *source,
                                      int16_t *destination,
                                      int32_t numFrames,
                                      float amplitude) {
    const float scaler = amplitude;
    for (int i = 0; i < numFrames; i++) {
        float sample = *source++;
        int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
        *destination++ = sample16;
        *destination++ = sample16;
    }
}

void AAudioConvert_formatMonoToStereo(const float *source,
                                      int16_t *destination,
                                      int32_t numFrames,
                                      float amplitude1,
                                      float amplitude2) {
    // divide by numFrames so that we almost reach amplitude2
    const float delta = (amplitude2 - amplitude1) / numFrames;
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        const float scaler = amplitude1 + (frameIndex * delta);
        const float sample = *source++;
        int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
        *destination++ = sample16;
        *destination++ = sample16;
    }
}

void AAudioConvert_formatMonoToStereo(const int16_t *source,
                                      float *destination,
                                      int32_t numFrames,
                                      float amplitude) {
    const float scaler = amplitude / SHORT_SCALE;
    for (int i = 0; i < numFrames; i++) {
        float sample = source[i] * scaler;
        *destination++ = sample;
        *destination++ = sample;
    }
}

// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
void AAudioConvert_formatMonoToStereo(const int16_t *source,
                                      float *destination,
                                      int32_t numFrames,
                                      float amplitude1,
                                      float amplitude2) {
    const float scaler1 = amplitude1 / SHORT_SCALE;
    const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        float scaler = scaler1 + (frameIndex * delta);
        float sample = source[frameIndex] * scaler;
        *destination++ = sample;
        *destination++ = sample;
    }
}

// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
void AAudio_linearRampMonoToStereo(const float *source,
                                   float *destination,
                                   int32_t numFrames,
                                   float amplitude1,
                                   float amplitude2) {
    const float delta = (amplitude2 - amplitude1) / numFrames;
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        float sample = *source++;

        // Clip to valid range of a float sample to prevent excessive volume.
        if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
        else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;

        const float scaler = amplitude1 + (frameIndex * delta);
        float sampleScaled = sample * scaler;
        *destination++ = sampleScaled;
        *destination++ = sampleScaled;
    }
}

// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
void AAudio_linearRampMonoToStereo(const int16_t *source,
                                   int16_t *destination,
                                   int32_t numFrames,
                                   float amplitude1,
                                   float amplitude2) {
    // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
    const float delta = (amplitude2 - amplitude1) / numFrames;
    for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
        const float scaler = amplitude1 + (frameIndex * delta);
        // No need to clip because int16_t range is inherently limited.
        const float sample =  *source++ * scaler;
        int16_t sample16 = (int16_t) roundf(sample);
        *destination++ = sample16;
        *destination++ = sample16;
    }
}

// *************************************************************************************
void AAudioDataConverter::convert(
        const FormattedData &source,
        const FormattedData &destination,
        int32_t numFrames,
        float levelFrom,
        float levelTo) {

    if (source.channelCount == 1 && destination.channelCount == 2) {
        convertMonoToStereo(source,
                            destination,
                            numFrames,
                            levelFrom,
                            levelTo);
    } else {
        // We only support mono to stereo conversion. Otherwise source and destination
        // must match.
        assert(source.channelCount == destination.channelCount);
        convertChannelsMatch(source,
                             destination,
                             numFrames,
                             levelFrom,
                             levelTo);
    }
}

void AAudioDataConverter::convertMonoToStereo(
        const FormattedData &source,
        const FormattedData &destination,
        int32_t numFrames,
        float levelFrom,
        float levelTo) {

    // The formats are validated when the stream is opened so we do not have to
    // check for illegal combinations here.
    if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
            AAudio_linearRampMonoToStereo(
                    (const float *) source.data,
                    (float *) destination.data,
                    numFrames,
                    levelFrom,
                    levelTo);
        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
            if (levelFrom != levelTo) {
                AAudioConvert_formatMonoToStereo(
                        (const float *) source.data,
                        (int16_t *) destination.data,
                        numFrames,
                        levelFrom,
                        levelTo);
            } else {
                AAudioConvert_formatMonoToStereo(
                        (const float *) source.data,
                        (int16_t *) destination.data,
                        numFrames,
                        levelTo);
            }
        }
    } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
            if (levelFrom != levelTo) {
                AAudioConvert_formatMonoToStereo(
                        (const int16_t *) source.data,
                        (float *) destination.data,
                        numFrames,
                        levelFrom,
                        levelTo);
            } else {
                AAudioConvert_formatMonoToStereo(
                        (const int16_t *) source.data,
                        (float *) destination.data,
                        numFrames,
                        levelTo);
            }
        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
            AAudio_linearRampMonoToStereo(
                    (const int16_t *) source.data,
                    (int16_t *) destination.data,
                    numFrames,
                    levelFrom,
                    levelTo);
        }
    }
}

void AAudioDataConverter::convertChannelsMatch(
        const FormattedData &source,
        const FormattedData &destination,
        int32_t numFrames,
        float levelFrom,
        float levelTo) {
    const int32_t numSamples = numFrames * source.channelCount;

    // The formats are validated when the stream is opened so we do not have to
    // check for illegal combinations here.
    if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
            AAudio_linearRamp(
                    (const float *) source.data,
                    (float *) destination.data,
                    numFrames,
                    source.channelCount,
                    levelFrom,
                    levelTo);
        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
            if (levelFrom != levelTo) {
                AAudioConvert_floatToPcm16(
                        (const float *) source.data,
                        (int16_t *) destination.data,
                        numFrames,
                        source.channelCount,
                        levelFrom,
                        levelTo);
            } else {
                AAudioConvert_floatToPcm16(
                        (const float *) source.data,
                        (int16_t *) destination.data,
                        numSamples,
                        levelTo);
            }
        }
    } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
        if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
            if (levelFrom != levelTo) {
                AAudioConvert_pcm16ToFloat(
                        (const int16_t *) source.data,
                        (float *) destination.data,
                        numFrames,
                        source.channelCount,
                        levelFrom,
                        levelTo);
            } else {
                AAudioConvert_pcm16ToFloat(
                        (const int16_t *) source.data,
                        (float *) destination.data,
                        numSamples,
                        levelTo);
            }
        } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
            AAudio_linearRamp(
                    (const int16_t *) source.data,
                    (int16_t *) destination.data,
                    numFrames,
                    source.channelCount,
                    levelFrom,
                    levelTo);
        }
    }
}

status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
    // This covers the case for AAUDIO_OK and for positive results.
    if (result >= 0) {
Loading