Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit abc898fa authored by Robert Wu's avatar Robert Wu Committed by Android (Google) Code Review
Browse files

Merge "AAudio: Add sample rate conversion to MMAP"

parents 2c80e912 d559ba53
Loading
Loading
Loading
Loading
+27 −11
Original line number Diff line number Diff line
@@ -39,18 +39,21 @@ using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;

aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
                          int32_t sourceChannelCount,
                          int32_t sourceSampleRate,
                          audio_format_t sinkFormat,
                          int32_t sinkChannelCount,
                          int32_t sinkSampleRate,
                          bool useMonoBlend,
                          bool useVolumeRamps,
                          float audioBalance,
                          bool isExclusive) {
                          aaudio::resampler::MultiChannelResampler::Quality resamplerQuality) {
    FlowGraphPortFloatOutput *lastOutput = nullptr;

    // TODO change back to ALOGD
    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
          "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
          useMonoBlend, audioBalance, isExclusive);
    ALOGD("%s() source format = 0x%08x, channels = %d, sample rate = %d, "
          "sink format = 0x%08x, channels = %d, sample rate = %d, "
          "useMonoBlend = %d, audioBalance = %f, useVolumeRamps %d",
          __func__, sourceFormat, sourceChannelCount, sourceSampleRate, sinkFormat,
          sinkChannelCount, sinkSampleRate, useMonoBlend, audioBalance, useVolumeRamps);

    switch (sourceFormat) {
        case AUDIO_FORMAT_PCM_FLOAT:
@@ -85,6 +88,15 @@ aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
        lastOutput = &mLimiter->output;
    }

    if (sourceSampleRate != sinkSampleRate) {
        mResampler.reset(aaudio::resampler::MultiChannelResampler::make(sinkChannelCount,
                sourceSampleRate, sinkSampleRate, resamplerQuality));
        mRateConverter = std::make_unique<SampleRateConverter>(sinkChannelCount,
                                                               *mResampler);
        lastOutput->connect(&mRateConverter->input);
        lastOutput = &mRateConverter->output;
    }

    // Expand the number of channels if required.
    if (sourceChannelCount == 1 && sinkChannelCount > 1) {
        mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
@@ -95,8 +107,7 @@ aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
        return AAUDIO_ERROR_UNIMPLEMENTED;
    }

    // Apply volume ramps for only exclusive streams.
    if (isExclusive) {
    if (useVolumeRamps) {
        // Apply volume ramps to set the left/right audio balance and target volumes.
        // The signals will be decoupled, volume ramps will be applied, before the signals are
        // combined again.
@@ -137,9 +148,14 @@ aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
    return AAUDIO_OK;
}

void AAudioFlowGraph::process(const void *source, void *destination, int32_t numFrames) {
    mSource->setData(source, numFrames);
    mSink->read(destination, numFrames);
int32_t AAudioFlowGraph::pull(void *destination, int32_t targetFramesToRead) {
    return mSink->read(destination, targetFramesToRead);
}

int32_t AAudioFlowGraph::process(const void *source, int32_t numFramesToWrite, void *destination,
                    int32_t targetFramesToRead) {
    mSource->setData(source, numFramesToWrite);
    return mSink->read(destination, targetFramesToRead);
}

/**
+41 −4
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/MultiToManyConverter.h>
#include <flowgraph/RampLinear.h>
#include <flowgraph/SampleRateConverter.h>

class AAudioFlowGraph {
public:
@@ -38,23 +39,57 @@ public:
     *
     * @param sourceFormat
     * @param sourceChannelCount
     * @param sourceSampleRate
     * @param sinkFormat
     * @param sinkChannelCount
     * @param sinkSampleRate
     * @param useMonoBlend
     * @param useVolumeRamps
     * @param audioBalance
     * @param channelMask
     * @param isExclusive
     * @param resamplerQuality
     * @return
     */
    aaudio_result_t configure(audio_format_t sourceFormat,
                              int32_t sourceChannelCount,
                              int32_t sourceSampleRate,
                              audio_format_t sinkFormat,
                              int32_t sinkChannelCount,
                              int32_t sinkSampleRate,
                              bool useMonoBlend,
                              bool useVolumeRamps,
                              float audioBalance,
                              bool isExclusive);
                              aaudio::resampler::MultiChannelResampler::Quality resamplerQuality);

    void process(const void *source, void *destination, int32_t numFrames);
    /**
     * Attempt to read targetFramesToRead from the flowgraph.
     * This function returns the number of frames actually read.
     *
     * This function does nothing if process() was not called before.
     *
     * @param destination
     * @param targetFramesToRead
     * @return numFramesRead
     */
    int32_t pull(void *destination, int32_t targetFramesToRead);

    /**
     * Set numFramesToWrite frames from the source into the flowgraph.
     * Then, attempt to read targetFramesToRead from the flowgraph.
     * This function returns the number of frames actually read.
     *
     * There may be data still in the flowgraph if targetFramesToRead is not large enough.
     * Before calling process() again, pull() must be called until until all the data is consumed.
     *
     * TODO: b/289510598 - Calculate the exact number of input frames needed for Y output frames.
     *
     * @param source
     * @param numFramesToWrite
     * @param destination
     * @param targetFramesToRead
     * @return numFramesRead
     */
    int32_t process(const void *source, int32_t numFramesToWrite, void *destination,
                    int32_t targetFramesToRead);

    /**
     * @param volume between 0.0 and 1.0
@@ -73,6 +108,8 @@ public:

private:
    std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::FlowGraphSourceBuffered> mSource;
    std::unique_ptr<RESAMPLER_OUTER_NAMESPACE::resampler::MultiChannelResampler> mResampler;
    std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::SampleRateConverter> mRateConverter;
    std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoBlend> mMonoBlend;
    std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::Limiter> mLimiter;
    std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoToMultiConverter> mChannelConverter;
+64 −21
Original line number Diff line number Diff line
@@ -63,6 +63,8 @@ using namespace aaudio;

#define LOG_TIMESTAMPS            0

#define ENABLE_SAMPLE_RATE_CONVERTER 1

AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
        : AudioStream()
        , mClockModel()
@@ -179,7 +181,6 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {

    mDeviceChannelCount = configurationOutput.getSamplesPerFrame();

    setSampleRate(configurationOutput.getSampleRate());
    setDeviceId(configurationOutput.getDeviceId());
    setSessionId(configurationOutput.getSessionId());
    setSharingMode(configurationOutput.getSharingMode());
@@ -190,6 +191,18 @@ aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
    setIsContentSpatialized(configurationOutput.isContentSpatialized());
    setInputPreset(configurationOutput.getInputPreset());

    setDeviceSampleRate(configurationOutput.getSampleRate());

    if (getSampleRate() == AAUDIO_UNSPECIFIED) {
        setSampleRate(configurationOutput.getSampleRate());
    }

#if !ENABLE_SAMPLE_RATE_CONVERTER
    if (getSampleRate() != getDeviceSampleRate()) {
        goto error;
    }
#endif

    // Save device format so we can do format conversion and volume scaling together.
    setDeviceFormat(configurationOutput.getFormat());

@@ -229,39 +242,46 @@ error:
}

aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
    int32_t framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
    int32_t deviceFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;

    // Scale up the burst size to meet the minimum equivalent in microseconds.
    // This is to avoid waking the CPU too often when the HW burst is very small
    // or at high sample rates.
    int32_t framesPerBurst = framesPerHardwareBurst;
    // or at high sample rates. The actual number of frames that we call back to
    // the app with will be 0 < N <= framesPerBurst so round up the division.
    int32_t framesPerBurst = (static_cast<int64_t>(deviceFramesPerBurst) * getSampleRate() +
             getDeviceSampleRate() - 1) / getDeviceSampleRate();
    int32_t burstMicros = 0;
    const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
    do {
        if (burstMicros > 0) {  // skip first loop
            deviceFramesPerBurst *= 2;
            framesPerBurst *= 2;
        }
        burstMicros = framesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
    } while (burstMicros < burstMinMicros);
    ALOGD("%s() original HW burst = %d, minMicros = %d => SW burst = %d\n",
          __func__, framesPerHardwareBurst, burstMinMicros, framesPerBurst);
          __func__, deviceFramesPerBurst, burstMinMicros, framesPerBurst);

    // Validate final burst size.
    if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
        ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
        return AAUDIO_ERROR_OUT_OF_RANGE;
    }
    setDeviceFramesPerBurst(deviceFramesPerBurst);
    setFramesPerBurst(framesPerBurst); // only save good value

    mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
    mDeviceBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;

    mBufferCapacityInFrames = static_cast<int64_t>(mDeviceBufferCapacityInFrames)
            * getSampleRate() / getDeviceSampleRate();
    if (mBufferCapacityInFrames < getFramesPerBurst()
            || mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
        ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
        return AAUDIO_ERROR_OUT_OF_RANGE;
    }

    mClockModel.setSampleRate(getSampleRate());
    mClockModel.setFramesPerBurst(framesPerHardwareBurst);
    mClockModel.setSampleRate(getDeviceSampleRate());
    mClockModel.setFramesPerBurst(deviceFramesPerBurst);

    if (isDataCallbackSet()) {
        mCallbackFrames = callbackFrames;
@@ -311,7 +331,8 @@ aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFr
        mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
    }

    setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
    // Default buffer size to match Q
    setBufferSize(mBufferCapacityInFrames / 2);
    return AAUDIO_OK;
}

@@ -370,9 +391,9 @@ aaudio_result_t AudioStreamInternal::exitStandby_l() {
    // Cache the buffer size which may be from client.
    const int32_t previousBufferSize = mBufferSizeInFrames;
    // Copy all available data from current data queue.
    uint8_t buffer[getBufferCapacity() * getBytesPerFrame()];
    android::fifo_frames_t fullFramesAvailable =
            mAudioEndpoint->read(buffer, getBufferCapacity());
    uint8_t buffer[getDeviceBufferCapacity() * getBytesPerFrame()];
    android::fifo_frames_t fullFramesAvailable = mAudioEndpoint->read(buffer,
            getDeviceBufferCapacity());
    mEndPointParcelable.closeDataFileDescriptor();
    aaudio_result_t result = mServiceInterface.exitStandby(
            mServiceStreamHandleInfo, endpointParcelable);
@@ -404,7 +425,7 @@ aaudio_result_t AudioStreamInternal::exitStandby_l() {
        goto exit;
    }
    // Write data from previous data buffer to new endpoint.
    if (android::fifo_frames_t framesWritten =
    if (const android::fifo_frames_t framesWritten =
                mAudioEndpoint->write(buffer, fullFramesAvailable);
            framesWritten != fullFramesAvailable) {
        ALOGW("Some data lost after exiting standby, frames written: %d, "
@@ -444,7 +465,7 @@ aaudio_result_t AudioStreamInternal::requestStart_l()
        ALOGD("requestStart() but DISCONNECTED");
        return AAUDIO_ERROR_DISCONNECTED;
    }
    aaudio_stream_state_t originalState = getState();
    const aaudio_stream_state_t originalState = getState();
    setState(AAUDIO_STREAM_STATE_STARTING);

    // Clear any stale timestamps from the previous run.
@@ -601,7 +622,11 @@ aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
    // Generated in server and passed to client. Return latest.
    if (mAtomicInternalTimestamp.isValid()) {
        Timestamp timestamp = mAtomicInternalTimestamp.read();
        int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
        // This should not overflow as timestamp.getPosition() should be a position in a buffer and
        // not the actual timestamp. timestamp.getNanoseconds() below uses the actual timestamp.
        // At 48000 Hz we can run for over 100 years before overflowing the int64_t.
        int64_t position = (timestamp.getPosition() + mFramesOffsetFromService) * getSampleRate() /
                getDeviceSampleRate();
        if (position >= 0) {
            *framePosition = position;
            *timeNanoseconds = timestamp.getNanoseconds();
@@ -885,7 +910,8 @@ aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
        adjustedFrames = maximumSize;
    } else {
        // Round to the next highest burst size.
        int32_t numBursts = (adjustedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
        int32_t numBursts = (static_cast<int64_t>(adjustedFrames) + getFramesPerBurst() - 1) /
                getFramesPerBurst();
        adjustedFrames = numBursts * getFramesPerBurst();
        // Clip just in case maximumSize is not a multiple of getFramesPerBurst().
        adjustedFrames = std::min(maximumSize, adjustedFrames);
@@ -893,23 +919,32 @@ aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {

    if (mAudioEndpoint) {
        // Clip against the actual size from the endpoint.
        int32_t actualFrames = 0;
        int32_t actualFramesDevice = 0;
        int32_t maximumFramesDevice = (static_cast<int64_t>(maximumSize) * getDeviceSampleRate()
                + getSampleRate() - 1) / getSampleRate();
        // Set to maximum size so we can write extra data when ready in order to reduce glitches.
        // The amount we keep in the buffer is controlled by mBufferSizeInFrames.
        mAudioEndpoint->setBufferSizeInFrames(maximumSize, &actualFrames);
        mAudioEndpoint->setBufferSizeInFrames(maximumFramesDevice, &actualFramesDevice);
        int32_t actualFrames = (static_cast<int64_t>(actualFramesDevice) * getSampleRate() +
                 getDeviceSampleRate() - 1) / getDeviceSampleRate();
        // actualFrames should be <= actual maximum size of endpoint
        adjustedFrames = std::min(actualFrames, adjustedFrames);
    }

    if (adjustedFrames != mBufferSizeInFrames) {
    const int32_t bufferSizeInFrames = adjustedFrames;
    const int32_t deviceBufferSizeInFrames = static_cast<int64_t>(bufferSizeInFrames) *
            getDeviceSampleRate() / getSampleRate();

    if (deviceBufferSizeInFrames != mDeviceBufferSizeInFrames) {
        android::mediametrics::LogItem(mMetricsId)
                .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
                .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, adjustedFrames)
                .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, deviceBufferSizeInFrames)
                .set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
                .record();
    }

    mBufferSizeInFrames = adjustedFrames;
    mBufferSizeInFrames = bufferSizeInFrames;
    mDeviceBufferSizeInFrames = deviceBufferSizeInFrames;
    ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
    return (aaudio_result_t) adjustedFrames;
}
@@ -918,10 +953,18 @@ int32_t AudioStreamInternal::getBufferSize() const {
    return mBufferSizeInFrames;
}

int32_t AudioStreamInternal::getDeviceBufferSize() const {
    return mDeviceBufferSizeInFrames;
}

int32_t AudioStreamInternal::getBufferCapacity() const {
    return mBufferCapacityInFrames;
}

int32_t AudioStreamInternal::getDeviceBufferCapacity() const {
    return mDeviceBufferCapacityInFrames;
}

bool AudioStreamInternal::isClockModelInControl() const {
    return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
}
+10 −2
Original line number Diff line number Diff line
@@ -22,8 +22,9 @@

#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceInterface.h"
#include "client/IsochronousClockModel.h"
#include "client/AAudioFlowGraph.h"
#include "client/AudioEndpoint.h"
#include "client/IsochronousClockModel.h"
#include "core/AudioStream.h"
#include "utility/AudioClock.h"

@@ -56,8 +57,12 @@ public:

    int32_t getBufferSize() const override;

    int32_t getDeviceBufferSize() const;

    int32_t getBufferCapacity() const override;

    int32_t getDeviceBufferCapacity() const override;

    int32_t getXRunCount() const override {
        return mXRunCount;
    }
@@ -177,6 +182,8 @@ protected:
    int64_t                  mLastFramesWritten = 0;
    int64_t                  mLastFramesRead = 0;

    AAudioFlowGraph          mFlowGraph;

private:
    /*
     * Asynchronous write with data conversion.
@@ -211,8 +218,9 @@ private:
    int32_t                  mDeviceChannelCount = 0;

    int32_t                  mBufferSizeInFrames = 0; // local threshold to control latency
    int32_t                  mDeviceBufferSizeInFrames = 0;
    int32_t                  mBufferCapacityInFrames = 0;

    int32_t                  mDeviceBufferCapacityInFrames = 0;

};

+82 −29
Original line number Diff line number Diff line
@@ -47,6 +47,27 @@ AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &

}

aaudio_result_t AudioStreamInternalCapture::open(const AudioStreamBuilder &builder) {
    aaudio_result_t result = AudioStreamInternal::open(builder);
    if (result == AAUDIO_OK) {
        result = mFlowGraph.configure(getDeviceFormat(),
                             getDeviceChannelCount(),
                             getDeviceSampleRate(),
                             getFormat(),
                             getSamplesPerFrame(),
                             getSampleRate(),
                             getRequireMonoBlend(),
                             false /* useVolumeRamps */,
                             getAudioBalance(),
                             aaudio::resampler::MultiChannelResampler::Quality::Medium);

        if (result != AAUDIO_OK) {
            safeReleaseClose();
        }
    }
    return result;
}

void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
    int64_t readCounter = mAudioEndpoint->getDataReadCounter();
    int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -149,7 +170,8 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t
                // Calculate frame position based off of the readCounter because
                // the writeCounter might have just advanced in the background,
                // causing us to sleep until a later burst.
                int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
                const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
                        getDeviceFramesPerBurst();
                wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
            }
                break;
@@ -166,42 +188,73 @@ aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t

aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
                                                                int32_t numFrames) {
    // ALOGD("readNowWithConversion(%p, %d)",
    //              buffer, numFrames);
    WrappingBuffer wrappingBuffer;
    uint8_t *destination = (uint8_t *) buffer;
    int32_t framesLeft = numFrames;
    uint8_t *byteBuffer = (uint8_t *) buffer;
    int32_t framesLeftInByteBuffer = numFrames;

    if (framesLeftInByteBuffer > 0) {
        // Pull data from the flowgraph in case there is residual data.
        const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
                (void *)byteBuffer,
                framesLeftInByteBuffer);

        const int32_t numBytesActuallyWrittenToByteBuffer =
                framesActuallyWrittenToByteBuffer * getBytesPerFrame();
        byteBuffer += numBytesActuallyWrittenToByteBuffer;
        framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
    }

    mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);

    // Read data in one or two parts.
    for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
        int32_t framesToProcess = framesLeft;
        const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
        if (framesAvailable <= 0) break;

        if (framesToProcess > framesAvailable) {
            framesToProcess = framesAvailable;
    // Write data in one or two parts.
    int partIndex = 0;
    int framesReadFromAudioEndpoint = 0;
    while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
        const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
        int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
        uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];

        // Put data from the wrapping buffer into the flowgraph 8 frames at a time.
        // Continuously pull as much data as possible from the flowgraph into the byte buffer.
        // The return value of mFlowGraph.process is the number of frames actually pulled.
        while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
            const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
                    framesAvailableInWrappingBuffer);

            const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
                    framesToReadFromWrappingBuffer;

            // If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
            // that all the data is pulled. If there is no more space in the byteBuffer, the
            // remaining data will be pulled in the following readNowWithConversion().
            const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
                    (void *)currentWrappingBuffer,
                    framesToReadFromWrappingBuffer,
                    (void *)byteBuffer,
                    framesLeftInByteBuffer);

            const int32_t numBytesActuallyWrittenToByteBuffer =
                    framesActuallyWrittenToByteBuffer * getBytesPerFrame();
            byteBuffer += numBytesActuallyWrittenToByteBuffer;
            framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
            currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
            framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;

            //ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
            //      "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
            //      , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
            //      framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
        }

        const int32_t numBytes = getBytesPerFrame() * framesToProcess;
        const int32_t numSamples = framesToProcess * getSamplesPerFrame();

        const audio_format_t sourceFormat = getDeviceFormat();
        const audio_format_t destinationFormat = getFormat();

        memcpy_by_audio_format(destination, destinationFormat,
                wrappingBuffer.data[partIndex], sourceFormat, numSamples);

        destination += numBytes;
        framesLeft -= framesToProcess;
        framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
                framesAvailableInWrappingBuffer;
        partIndex++;
    }

    int32_t framesProcessed = numFrames - framesLeft;
    mAudioEndpoint->advanceReadIndex(framesProcessed);
    // The audio endpoint should reference the number of frames written to the wrapping buffer.
    mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);

    //ALOGD("readNowWithConversion() returns %d", framesProcessed);
    return framesProcessed;
    // The internal code should use the number of frames read from the app.
    return numFrames - framesLeftInByteBuffer;
}

int64_t AudioStreamInternalCapture::getFramesWritten() {
Loading