Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 15959400 authored by Android Build Coastguard Worker's avatar Android Build Coastguard Worker
Browse files

Snap for 12918425 from 2b9db132 to 25Q2-release

Change-Id: Ifb87f33e9b1d92b8066e903206f71aced86e125d
parents 86305ab2 2b9db132
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -119,7 +119,6 @@ cc_library_shared {

cc_library_shared {
    name: "libcamera2ndk_vendor",
    cpp_std: "gnu++17",
    vendor: true,
    srcs: [
        "NdkCameraCaptureSession.cpp",
+6 −1
Original line number Diff line number Diff line
@@ -119,7 +119,8 @@ public:
    aaudio_result_t open(const AAudioParameters &parameters,
                         AAudioStream_dataCallback dataCallback = nullptr,
                         AAudioStream_errorCallback errorCallback = nullptr,
                         void *userContext = nullptr) {
                         void *userContext = nullptr,
                         AAudioStream_presentationEndCallback presentationEndCallback = nullptr) {
        aaudio_result_t result = AAUDIO_OK;

        // Use an AAudioStreamBuilder to contain requested parameters.
@@ -137,6 +138,10 @@ public:
        if (errorCallback != nullptr) {
            AAudioStreamBuilder_setErrorCallback(builder, errorCallback, userContext);
        }
        if (presentationEndCallback != nullptr) {
            AAudioStreamBuilder_setPresentationEndCallback(
                    builder, presentationEndCallback, userContext);
        }
        //AAudioStreamBuilder_setFramesPerDataCallback(builder, CALLBACK_SIZE_FRAMES);
        //AAudioStreamBuilder_setBufferCapacityInFrames(builder, 48 * 8);

+3 −0
Original line number Diff line number Diff line
@@ -131,6 +131,9 @@ aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
            // that is some multiple of the burst size.
            notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
        }
    } else if (getPerformanceMode() == AAUDIO_PERFORMANCE_MODE_POWER_SAVING_OFFLOADED) {
        streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC_NOTIF_CALLBACK;
        callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
    }
    mCallbackBufferSize = builder.getFramesPerDataCallback();

+94 −21
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

// PCM offload

#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
@@ -35,17 +36,23 @@ aaudio_data_callback_result_t MyDatacallback(AAudioStream* stream,

void MyErrorCallback(AAudioStream* /*stream*/, void* /*userData*/, aaudio_result_t error);

void MyPresentationEndCallback(AAudioStream* /*stream*/, void* userData);

class OffloadPlayer : public AAudioSimplePlayer {
public:
    OffloadPlayer(AAudioArgsParser& argParser, int delay, int padding, int streamFrames)
            : mArgParser(argParser), mDelay(delay), mPadding(padding), mStreamFrames(streamFrames) {
    OffloadPlayer(AAudioArgsParser& argParser, int delay, int padding, int streamFrames,
                  bool useDataCallback)
            : mArgParser(argParser), mDelay(delay), mPadding(padding), mStreamFrames(streamFrames),
              mUseDataCallback(useDataCallback) {
    }

    aaudio_result_t open() {
        aaudio_result_t result = AAudioSimplePlayer::open(mArgParser,
                                                          &MyDatacallback,
        aaudio_result_t result = AAudioSimplePlayer::open(
                mArgParser,
                mUseDataCallback ? &MyDatacallback : nullptr,
                &MyErrorCallback,
                                                          this);
                this,
                &MyPresentationEndCallback);
        if (result != AAUDIO_OK) {
            return result;
        }
@@ -62,20 +69,7 @@ public:
                                              void* audioData,
                                              int32_t numFrames) {
        // Just handle PCM_16 and PCM_FLOAT for testing
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                int16_t *audioBuffer = static_cast<int16_t *>(audioData);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                float *audioBuffer = static_cast<float *>(audioData);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            default:
        if (!fillData(stream, audioData, numFrames)) {
            return AAUDIO_CALLBACK_RESULT_STOP;
        }
        mFramesWritten += numFrames;
@@ -90,11 +84,78 @@ public:
        return AAUDIO_CALLBACK_RESULT_CONTINUE;
    }

    void presentationEnd(AAudioStream* stream) {
        printf("Presentation end\n");
        if (!mUseDataCallback) {
            writeAllStreamData(stream);
        }
    }

    void writeData() {
        writeAllStreamData(getStream());
    }

private:
    void writeAllStreamData(AAudioStream* stream) {
        int bytesPerFrame = mChannelCount;
        std::shared_ptr<uint8_t[]> data;
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                bytesPerFrame *= 2;
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                bytesPerFrame *= 4;
            } break;
            default:
                printf("Unsupported format %d\n", AAudioStream_getFormat(stream));
                return;
        }
        data = std::make_shared<uint8_t[]>(bytesPerFrame * mStreamFrames);
        fillData(stream, static_cast<void*>(data.get()), mStreamFrames);
        int bytesWritten = 0;
        int framesLeft = mStreamFrames;
        while (framesLeft > 0) {
            auto framesWritten = AAudioStream_write(
                    stream, static_cast<void *>(&data[bytesWritten]),
                    framesLeft, NANOS_PER_SECOND);
            if (framesWritten < 0) {
                printf("Failed to write data %d\n", framesWritten);
                return;
            }
            printf("Write data succeed, frames=%d\n", framesWritten);
            framesLeft -= framesWritten;
            bytesWritten += framesWritten * bytesPerFrame;
        }
        if (auto result = setOffloadEndOfStream(); result != AAUDIO_OK) {
            printf("Failed to set offload end of stream, result=%d\n", result);
        }
    }

    bool fillData(AAudioStream* stream, void* data, int numFrames) {
        switch (AAudioStream_getFormat(stream)) {
            case AAUDIO_FORMAT_PCM_I16: {
                int16_t *audioBuffer = static_cast<int16_t *>(data);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            case AAUDIO_FORMAT_PCM_FLOAT: {
                float *audioBuffer = static_cast<float *>(data);
                for (int i = 0; i < mChannelCount; ++i) {
                    mSines[i].render(&audioBuffer[i], mChannelCount, numFrames);
                }
            } break;
            default:
                return false;
        }
        return true;
    }

    const AAudioArgsParser mArgParser;
    const int mDelay;
    const int mPadding;
    const int mStreamFrames;
    const bool mUseDataCallback;

    int mChannelCount;
    std::vector<SineGenerator> mSines;
@@ -113,12 +174,18 @@ void MyErrorCallback(AAudioStream* /*stream*/, void* /*userData*/, aaudio_result
    printf("Error callback, error=%d\n", error);
}

void MyPresentationEndCallback(AAudioStream* stream, void* userData) {
    OffloadPlayer* player = static_cast<OffloadPlayer*>(userData);
    return player->presentationEnd(stream);
}

static void usage() {
    AAudioArgsParser::usage();
    printf("      -D{delay} offload delay in frames\n");
    printf("      -P{padding} offload padding in frames\n");
    printf("      -E{frames} frames to notify end of stream\n");
    printf("      -T{seconds} time to run the test\n");
    printf("      -B use blocking write instead of data callback\n");
}

int main(int argc, char **argv) {
@@ -127,6 +194,7 @@ int main(int argc, char **argv) {
    int padding = 0;
    int streamFrames = 0;
    int timeToRun = DEFAULT_TIME_TO_RUN_IN_SECOND;
    bool useDataCallback = true;
    for (int i = 1; i < argc; ++i) {
        const char *arg = argv[i];
        if (argParser.parseArg(arg)) {
@@ -145,6 +213,9 @@ int main(int argc, char **argv) {
                    case 'T':
                        timeToRun = atoi(&arg[2]);
                        break;
                    case 'B':
                        useDataCallback = false;
                        break;
                    default:
                        usage();
                        exit(EXIT_FAILURE);
@@ -159,7 +230,7 @@ int main(int argc, char **argv) {
    // Force to use offload mode
    argParser.setPerformanceMode(AAUDIO_PERFORMANCE_MODE_POWER_SAVING_OFFLOADED);

    OffloadPlayer player(argParser, delay, padding, streamFrames);
    OffloadPlayer player(argParser, delay, padding, streamFrames, useDataCallback);
    if (auto result = player.open(); result != AAUDIO_OK) {
        printf("Failed to open stream, error=%d\n", result);
        exit(EXIT_FAILURE);
@@ -172,6 +243,8 @@ int main(int argc, char **argv) {
    if (auto result = player.start(); result != AAUDIO_OK) {
        printf("Failed to start stream, error=%d", result);
        exit(EXIT_FAILURE);
    } else if (!useDataCallback) {
        player.writeData();
    }

    sleep(timeToRun);
+2 −0
Original line number Diff line number Diff line
@@ -55,6 +55,8 @@ static const std::vector<Feature> DECODER_FEATURES = {
    Feature(FEATURE_MultipleFrames,   (1 << 5), false),
    Feature(FEATURE_DynamicTimestamp, (1 << 6), false),
    Feature(FEATURE_LowLatency,       (1 << 7), true),
    Feature(FEATURE_DynamicColorAspects, (1 << 8), true),
    Feature(FEATURE_DetachedSurface,     (1 << 9), true),
    // feature to exclude codec from REGULAR codec list
    Feature(FEATURE_SpecialCodec,     (1 << 30), false, true),
};
Loading