Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c382060 authored by Phil Burk's avatar Phil Burk Committed by android-build-merger
Browse files

Merge "aaudio: fix SHARED MMAP mode in server plus other bugs" into oc-dev

am: 4aa3bd04

Change-Id: I4bbf45a2232e41a0b8d3ee394e13edc3f1d8c519
parents 9a531d9a 4aa3bd04
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ private:
        }
    }

    double mAmplitude = 0.05;  // unitless scaler
    double mAmplitude = 0.005;  // unitless scaler
    double mPhase = 0.0;
    double mPhaseIncrement = 440 * M_PI * 2 / 48000;
    double mFrameRate = 48000;
+55 −33
Original line number Diff line number Diff line
@@ -23,11 +23,15 @@
#include "SineGenerator.h"

#define SAMPLE_RATE   48000
#define NUM_SECONDS   10
#define NUM_SECONDS   5
#define NANOS_PER_MICROSECOND ((int64_t)1000)
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * 1000)

#define REQUESTED_FORMAT  AAUDIO_FORMAT_PCM_I16
#define REQUESTED_SHARING_MODE  AAUDIO_SHARING_MODE_SHARED
//#define REQUESTED_SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE

static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
    const char *modeText = "unknown";
    switch (mode) {
@@ -63,11 +67,8 @@ int main(int argc, char **argv)
    int actualSamplesPerFrame = 0;
    const int requestedSampleRate = SAMPLE_RATE;
    int actualSampleRate = 0;
    const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
    aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;

    //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
    const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
    aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;

    AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -79,7 +80,8 @@ int main(int argc, char **argv)
    int32_t  framesToPlay = 0;
    int32_t  framesLeft = 0;
    int32_t  xRunCount = 0;
    int16_t *data = nullptr;
    float   *floatData = nullptr;
    int16_t *shortData = nullptr;

    SineGenerator sineOsc1;
    SineGenerator sineOsc2;
@@ -88,7 +90,7 @@ int main(int argc, char **argv)
    // in a buffer if we hang or crash.
    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);

    printf("%s - Play a sine wave using AAudio\n", argv[0]);
    printf("%s - Play a sine wave using AAudio, Z2\n", argv[0]);

    // Use an AAudioStreamBuilder to contain requested parameters.
    result = AAudio_createStreamBuilder(&aaudioBuilder);
@@ -99,8 +101,8 @@ int main(int argc, char **argv)
    // Request stream properties.
    AAudioStreamBuilder_setSampleRate(aaudioBuilder, requestedSampleRate);
    AAudioStreamBuilder_setSamplesPerFrame(aaudioBuilder, requestedSamplesPerFrame);
    AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
    AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
    AAudioStreamBuilder_setFormat(aaudioBuilder, REQUESTED_FORMAT);
    AAudioStreamBuilder_setSharingMode(aaudioBuilder, REQUESTED_SHARING_MODE);

    // Create an AAudioStream using the Builder.
    result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
@@ -124,15 +126,16 @@ int main(int argc, char **argv)

    actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
    printf("SharingMode: requested = %s, actual = %s\n",
            getSharingModeText(requestedSharingMode),
            getSharingModeText(REQUESTED_SHARING_MODE),
            getSharingModeText(actualSharingMode));

    // This is the number of frames that are read in one chunk by a DMA controller
    // or a DSP or a mixer.
    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
    printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
    printf("Buffer: framesPerBurst = %d\n",framesPerBurst);
    printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
    bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
    printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
    printf("Buffer: bufferCapacity = %d, remainder = %d\n",
           bufferCapacity, bufferCapacity % framesPerBurst);

    // Some DMA might use very short bursts of 16 frames. We don't need to write such small
@@ -144,14 +147,16 @@ int main(int argc, char **argv)
    printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);

    actualDataFormat = AAudioStream_getFormat(aaudioStream);
    printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
    printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
    // TODO handle other data formats

    // Allocate a buffer for the audio data.
    data = new int16_t[framesPerWrite * actualSamplesPerFrame];
    if (data == nullptr) {
        fprintf(stderr, "ERROR - could not allocate data buffer\n");
        result = AAUDIO_ERROR_NO_MEMORY;
    if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
        floatData = new float[framesPerWrite * actualSamplesPerFrame];
    } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
        shortData = new int16_t[framesPerWrite * actualSamplesPerFrame];
    } else {
        printf("ERROR Unsupported data format!\n");
        goto finish;
    }

@@ -170,26 +175,41 @@ int main(int argc, char **argv)
    framesToPlay = actualSampleRate * NUM_SECONDS;
    framesLeft = framesToPlay;
    while (framesLeft > 0) {

        if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
            // Render sine waves to left and right channels.
            sineOsc1.render(&floatData[0], actualSamplesPerFrame, framesPerWrite);
            if (actualSamplesPerFrame > 1) {
                sineOsc2.render(&floatData[1], actualSamplesPerFrame, framesPerWrite);
            }
        } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
            // Render sine waves to left and right channels.
        sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
            sineOsc1.render(&shortData[0], actualSamplesPerFrame, framesPerWrite);
            if (actualSamplesPerFrame > 1) {
            sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
                sineOsc2.render(&shortData[1], actualSamplesPerFrame, framesPerWrite);
            }
        }

        // Write audio data to the stream.
        int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
        int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
        int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
        int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
        int32_t minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
        int32_t actual = 0;
        if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
            actual = AAudioStream_write(aaudioStream, floatData, minFrames, timeoutNanos);
        } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
            actual = AAudioStream_write(aaudioStream, shortData, minFrames, timeoutNanos);
        }
        if (actual < 0) {
            fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
            fprintf(stderr, "ERROR - AAudioStream_write() returned %d\n", actual);
            goto finish;
        } else if (actual == 0) {
            fprintf(stderr, "WARNING - AAudioStream_write() returned %zd\n", actual);
            fprintf(stderr, "WARNING - AAudioStream_write() returned %d\n", actual);
            goto finish;
        }
        framesLeft -= actual;

        // Use timestamp to estimate latency.
        /*
        {
            int64_t presentationFrame;
            int64_t presentationTime;
@@ -208,13 +228,15 @@ int main(int argc, char **argv)
                printf("estimatedLatencyMillis %d\n", (int)estimatedLatencyMillis);
            }
        }
         */
    }

    xRunCount = AAudioStream_getXRunCount(aaudioStream);
    printf("AAudioStream_getXRunCount %d\n", xRunCount);

finish:
    delete[] data;
    delete[] floatData;
    delete[] shortData;
    AAudioStream_close(aaudioStream);
    AAudioStreamBuilder_delete(aaudioBuilder);
    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+7 −19
Original line number Diff line number Diff line
@@ -31,8 +31,6 @@
//#define SHARING_MODE  AAUDIO_SHARING_MODE_EXCLUSIVE
#define SHARING_MODE  AAUDIO_SHARING_MODE_SHARED

#define  CALLBACK_SIZE_FRAMES    128

// TODO refactor common code into a single SimpleAAudio class
/**
 * Simple wrapper for AAudio that opens a default stream and then calls
@@ -87,8 +85,8 @@ public:
        AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
        AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
        AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
        AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
 //       AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
 //       AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
        AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);

        // Open an AAudioStream using the Builder.
        result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
@@ -136,7 +134,7 @@ public:
     aaudio_result_t start() {
        aaudio_result_t result = AAudioStream_requestStart(mStream);
        if (result != AAUDIO_OK) {
            fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
            printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
                    result, AAudio_convertResultToText(result));
        }
        return result;
@@ -146,7 +144,7 @@ public:
    aaudio_result_t stop() {
        aaudio_result_t result = AAudioStream_requestStop(mStream);
        if (result != AAUDIO_OK) {
            fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
            printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
                    result, AAudio_convertResultToText(result));
        }
        int32_t xRunCount = AAudioStream_getXRunCount(mStream);
@@ -169,9 +167,6 @@ private:
typedef struct SineThreadedData_s {
    SineGenerator  sineOsc1;
    SineGenerator  sineOsc2;
    // Remove these variables used for testing.
    int32_t        numFrameCounts;
    int32_t        frameCounts[MAX_FRAME_COUNT_RECORDS];
    int            scheduler;
    bool           schedulerChecked;
} SineThreadedData_t;
@@ -186,10 +181,6 @@ aaudio_data_callback_result_t MyDataCallbackProc(

    SineThreadedData_t *sineData = (SineThreadedData_t *) userData;

    if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
        sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
    }

    if (!sineData->schedulerChecked) {
        sineData->scheduler = sched_getscheduler(gettid());
        sineData->schedulerChecked = true;
@@ -236,11 +227,10 @@ int main(int argc, char **argv)
    // Make printf print immediately so that debug info is not stuck
    // in a buffer if we hang or crash.
    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
    printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
    printf("%s - Play a sine sweep using an AAudio callback, Z1\n", argv[0]);

    player.setSharingMode(SHARING_MODE);

    myData.numFrameCounts = 0;
    myData.schedulerChecked = false;

    result = player.open(MyDataCallbackProc, &myData);
@@ -291,19 +281,17 @@ int main(int argc, char **argv)
    }
    printf("Woke up now.\n");

    printf("call stop()\n");
    result = player.stop();
    if (result != AAUDIO_OK) {
        goto error;
    }
    printf("call close()\n");
    result = player.close();
    if (result != AAUDIO_OK) {
        goto error;
    }

    // Report data gathered in the callback.
    for (int i = 0; i < myData.numFrameCounts; i++) {
        printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
    }
    if (myData.schedulerChecked) {
        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
               myData.scheduler,
+28 −18
Original line number Diff line number Diff line
@@ -75,6 +75,10 @@ static const sp<IAAudioService> getAAudioService() {
    return gAAudioService;
}

static void dropAAudioService() {
    Mutex::Autolock _l(gServiceLock);
    gAAudioService.clear(); // force a reconnect
}

AAudioBinderClient::AAudioBinderClient()
        : AAudioServiceInterface() {}
@@ -88,14 +92,26 @@ AAudioBinderClient::~AAudioBinderClient() {}
*/
aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
                                               AAudioStreamConfiguration &configurationOutput) {

    aaudio_handle_t stream;
    for (int i = 0; i < 2; i++) {
        const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->openStream(request, configurationOutput);
        if (service == 0) {
            return AAUDIO_ERROR_NO_SERVICE;
        }

aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
        stream = service->openStream(request, configurationOutput);

        if (stream == AAUDIO_ERROR_NO_SERVICE) {
            ALOGE("AAudioBinderClient: lost connection to AAudioService.");
            dropAAudioService(); // force a reconnect
        } else {
            break;
        }
    }
    return stream;
}

aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->closeStream(streamHandle);
@@ -106,37 +122,33 @@ aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
*/
aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
                                                         AudioEndpointParcelable &parcelable) {

    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->getStreamDescription(streamHandle, parcelable);
}

/**
* Start the flow of data.
*/
aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->startStream(streamHandle);
}

/**
* Stop the flow of data such that start() can resume without loss of data.
*/
aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->startStream(streamHandle);
    return service->pauseStream(streamHandle);
}

aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->stopStream(streamHandle);
}

/**
*  Discard any data held by the underlying HAL or Service.
*/
aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
    const sp<IAAudioService> &service = getAAudioService();
    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
    return service->startStream(streamHandle);
    return service->flushStream(streamHandle);
}

/**
@@ -163,5 +175,3 @@ aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t stream
                                          clientProcessId,
                                          clientThreadId);
}

+2 −0
Original line number Diff line number Diff line
@@ -66,6 +66,8 @@ public:
     */
    aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;

    aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;

    /**
     *  Discard any data held by the underlying HAL or Service.
     * This is asynchronous. When complete, the service will send a FLUSHED event.
Loading