Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d464bd3 authored by jiabin's avatar jiabin Committed by Android Build Cherrypicker Worker
Browse files

Always set AudioTrack callback if it is offload playback.

If the client is not using data callback, aaudio should also set
AudioTrack callback if it is a offload playback. The reason is that the
framework will need to notify the client for stream end when doing
gapless offload.

Bug: 388969394
Test: test_pcm_offload -c2 -po -f1 -r48000 -T40 -E960000 -B
Flag: EXEMPT bugfix
(cherry picked from https://googleplex-android-review.googlesource.com/q/commit:1ce42114f26a546553de35ac8264a4c45bb4ebc7)
Merged-In: Ia55f162f2888b3a1b7751cc4e4bb3e198979a801
Change-Id: Ia55f162f2888b3a1b7751cc4e4bb3e198979a801
parent 8ff1b5e2
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -119,7 +119,8 @@ public:
    aaudio_result_t open(const AAudioParameters &parameters,
                         AAudioStream_dataCallback dataCallback = nullptr,
                         AAudioStream_errorCallback errorCallback = nullptr,
                         void *userContext = nullptr) {
                         void *userContext = nullptr,
                         AAudioStream_presentationEndCallback presentationEndCallback = nullptr) {
        aaudio_result_t result = AAUDIO_OK;

        // Use an AAudioStreamBuilder to contain requested parameters.
@@ -137,6 +138,10 @@ public:
        if (errorCallback != nullptr) {
            AAudioStreamBuilder_setErrorCallback(builder, errorCallback, userContext);
        }
        if (presentationEndCallback != nullptr) {
            AAudioStreamBuilder_setPresentationEndCallback(
                    builder, presentationEndCallback, userContext);
        }
        //AAudioStreamBuilder_setFramesPerDataCallback(builder, CALLBACK_SIZE_FRAMES);
        //AAudioStreamBuilder_setBufferCapacityInFrames(builder, 48 * 8);

+3 −0
Original line number Diff line number Diff line
@@ -130,6 +130,9 @@ aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
            // that is some multiple of the burst size.
            notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
        }
    } else if (getPerformanceMode() == AAUDIO_PERFORMANCE_MODE_POWER_SAVING_OFFLOADED) {
        streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC_NOTIF_CALLBACK;
        callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
    }
    mCallbackBufferSize = builder.getFramesPerDataCallback();

+94 −21
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

// PCM offload

#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
@@ -35,17 +36,23 @@ aaudio_data_callback_result_t MyDatacallback(AAudioStream* stream,

void MyErrorCallback(AAudioStream* /*stream*/, void* /*userData*/, aaudio_result_t error);

void MyPresentationEndCallback(AAudioStream* /*stream*/, void* userData);

class OffloadPlayer : public AAudioSimplePlayer {
public:
    OffloadPlayer(AAudioArgsParser& argParser, int delay, int padding, int streamFrames)
            : mArgParser(argParser), mDelay(delay), mPadding(padding), mStreamFrames(streamFrames) {
    OffloadPlayer(AAudioArgsParser& argParser, int delay, int padding, int streamFrames,
                  bool useDataCallback)
            : mArgParser(argParser), mDelay(delay), mPadding(padding), mStreamFrames(streamFrames),
              mUseDataCallback(useDataCallback) {
    }

    aaudio_result_t open() {
        aaudio_result_t result = AAudioSimplePlayer::open(mArgParser,
                                                          &MyDatacallback,
        aaudio_result_t result = AAudioSimplePlayer::open(
                mArgParser,
                mUseDataCallback ? &MyDatacallback : nullptr,
                &MyErrorCallback,
                                                          this);
                this,
                &MyPresentationEndCallback);
        if (result != AAUDIO_OK) {
            return result;
        }
@@ -62,20 +69,7 @@ public:
                                              void* audioData,
                                              int32_t numFrames) {
        // Just handle PCM_16 and PCM_FLOAT for testing
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                int16_t *audioBuffer = static_cast<int16_t *>(audioData);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                float *audioBuffer = static_cast<float *>(audioData);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            default:
        if (!fillData(stream, audioData, numFrames)) {
            return AAUDIO_CALLBACK_RESULT_STOP;
        }
        mFramesWritten += numFrames;
@@ -90,11 +84,78 @@ public:
        return AAUDIO_CALLBACK_RESULT_CONTINUE;
    }

    void presentationEnd(AAudioStream* stream) {
        printf("Presentation end\n");
        if (!mUseDataCallback) {
            writeAllStreamData(stream);
        }
    }

    void writeData() {
        writeAllStreamData(getStream());
    }

private:
    void writeAllStreamData(AAudioStream* stream) {
        int bytesPerFrame = mChannelCount;
        std::shared_ptr<uint8_t[]> data;
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                bytesPerFrame *= 2;
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                bytesPerFrame *= 4;
            } break;
            default:
                printf("Unsupported format %d\n", AAudioStream_getFormat(stream));
                return;
        }
        data = std::make_shared<uint8_t[]>(bytesPerFrame * mStreamFrames);
        fillData(stream, static_cast<void*>(data.get()), mStreamFrames);
        int bytesWritten = 0;
        int framesLeft = mStreamFrames;
        while (framesLeft > 0) {
            auto framesWritten = AAudioStream_write(
                    stream, static_cast<void *>(&data[bytesWritten]),
                    framesLeft, NANOS_PER_SECOND);
            if (framesWritten < 0) {
                printf("Failed to write data %d\n", framesWritten);
                return;
            }
            printf("Write data succeed, frames=%d\n", framesWritten);
            framesLeft -= framesWritten;
            bytesWritten += framesWritten * bytesPerFrame;
        }
        if (auto result = setOffloadEndOfStream(); result != AAUDIO_OK) {
            printf("Failed to set offload end of stream, result=%d\n", result);
        }
    }

    bool fillData(AAudioStream* stream, void* data, int numFrames) {
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                int16_t *audioBuffer = static_cast<int16_t *>(data);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                float *audioBuffer = static_cast<float *>(data);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            default:
                return false;
        }
        return true;
    }

    const AAudioArgsParser mArgParser;
    const int mDelay;
    const int mPadding;
    const int mStreamFrames;
    const bool mUseDataCallback;

    int mChannelCount;
    std::vector<SineGenerator> mSines;
@@ -113,12 +174,18 @@ void MyErrorCallback(AAudioStream* /*stream*/, void* /*userData*/, aaudio_result
    printf("Error callback, error=%d\n", error);
}

void MyPresentationEndCallback(AAudioStream* stream, void* userData) {
    OffloadPlayer* player = static_cast<OffloadPlayer*>(userData);
    return player->presentationEnd(stream);
}

static void usage() {
    AAudioArgsParser::usage();
    printf("      -D{delay} offload delay in frames\n");
    printf("      -P{padding} offload padding in frames\n");
    printf("      -E{frames} frames to notify end of stream\n");
    printf("      -T{seconds} time to run the test\n");
    printf("      -B use blocking write instead of data callback\n");
}

int main(int argc, char **argv) {
@@ -127,6 +194,7 @@ int main(int argc, char **argv) {
    int padding = 0;
    int streamFrames = 0;
    int timeToRun = DEFAULT_TIME_TO_RUN_IN_SECOND;
    bool useDataCallback = true;
    for (int i = 1; i < argc; ++i) {
        const char *arg = argv[i];
        if (argParser.parseArg(arg)) {
@@ -145,6 +213,9 @@ int main(int argc, char **argv) {
                    case 'T':
                        timeToRun = atoi(&arg[2]);
                        break;
                    case 'B':
                        useDataCallback = false;
                        break;
                    default:
                        usage();
                        exit(EXIT_FAILURE);
@@ -159,7 +230,7 @@ int main(int argc, char **argv) {
    // Force to use offload mode
    argParser.setPerformanceMode(AAUDIO_PERFORMANCE_MODE_POWER_SAVING_OFFLOADED);

    OffloadPlayer player(argParser, delay, padding, streamFrames);
    OffloadPlayer player(argParser, delay, padding, streamFrames, useDataCallback);
    if (auto result = player.open(); result != AAUDIO_OK) {
        printf("Failed to open stream, error=%d\n", result);
        exit(EXIT_FAILURE);
@@ -172,6 +243,8 @@ int main(int argc, char **argv) {
    if (auto result = player.start(); result != AAUDIO_OK) {
        printf("Failed to start stream, error=%d", result);
        exit(EXIT_FAILURE);
    } else if (!useDataCallback) {
        player.writeData();
    }

    sleep(timeToRun);