Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cc840346 authored by Phil Burk's avatar Phil Burk Committed by Android (Google) Code Review
Browse files

Merge changes from topic "aaudio_timestamps" into oc-mr1-dev

* changes:
  aaudio: fix timestamps and underruns
  aaudio: prevent retrograde motion in Legacy timestamps
  aaudio example: print timestamps during callbacks.
  aaudio: test timestamps
parents b3631ec2 bcc36746
Loading
Loading
Loading
Loading
+43 −10
Original line number Diff line number Diff line
@@ -31,18 +31,51 @@
#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * 1000)

const char *getSharingModeText(aaudio_sharing_mode_t mode) {
    const char *modeText = "unknown";
    const char *text = "unknown";
    switch (mode) {
        case AAUDIO_SHARING_MODE_EXCLUSIVE:
        modeText = "EXCLUSIVE";
            text = "EXCLUSIVE";
            break;
        case AAUDIO_SHARING_MODE_SHARED:
        modeText = "SHARED";
            text = "SHARED";
            break;
        default:
            break;
    }
    return modeText;
    return text;
}

const char *getPerformanceModeText(aaudio_performance_mode_t mode) {
    const char *text = "unknown";
    switch (mode) {
        case AAUDIO_PERFORMANCE_MODE_NONE:
            text = "NONE";
            break;
        case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
            text = "LOW_LATENCY";
            break;
        case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
            text = "POWER_SAVING";
            break;
        default:
            break;
    }
    return text;
}

const char *getDirectionText(aaudio_direction_t direction) {
    const char *text = "unknown";
    switch (direction) {
        case AAUDIO_DIRECTION_INPUT:
            text = "INPUT";
            break;
        case AAUDIO_DIRECTION_OUTPUT:
            text = "OUTPUT";
            break;
        default:
            break;
    }
    return text;
}

static void convertNanosecondsToTimespec(int64_t nanoseconds, struct timespec *time) {
+20 −0
Original line number Diff line number Diff line
@@ -36,6 +36,13 @@
// How long to sleep in a callback to cause an intentional glitch. For testing.
#define FORCED_UNDERRUN_SLEEP_MICROS     (10 * 1000)

#define MAX_TIMESTAMPS   16

typedef struct Timestamp {
    int64_t position;
    int64_t nanoseconds;
} Timestamp;

/**
 * Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
 */
@@ -227,10 +234,12 @@ typedef struct SineThreadedData_s {

    SineGenerator  sineOsc1;
    SineGenerator  sineOsc2;
    Timestamp      timestamps[MAX_TIMESTAMPS];
    int64_t        framesTotal = 0;
    int64_t        nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
    int32_t        minNumFrames = INT32_MAX;
    int32_t        maxNumFrames = 0;
    int32_t        timestampCount = 0; // in timestamps

    int            scheduler = 0;
    bool           schedulerChecked = false;
@@ -273,6 +282,17 @@ aaudio_data_callback_result_t SimplePlayerDataCallbackProc(
        sineData->schedulerChecked = true;
    }

    if (sineData->timestampCount < MAX_TIMESTAMPS) {
        Timestamp *timestamp = &sineData->timestamps[sineData->timestampCount];
        aaudio_result_t result = AAudioStream_getTimestamp(stream,
            CLOCK_MONOTONIC, &timestamp->position, &timestamp->nanoseconds);
        if (result == AAUDIO_OK && // valid?
                (sineData->timestampCount == 0 || // first one?
                (timestamp->position != (timestamp - 1)->position))) { // advanced position?
            sineData->timestampCount++; // keep this one
        }
    }

    if (numFrames > sineData->maxNumFrames) {
        sineData->maxNumFrames = numFrames;
    }
+12 −0
Original line number Diff line number Diff line
@@ -120,6 +120,18 @@ static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser)
        goto error;
    }

    for (int i = 0; i < myData.timestampCount; i++) {
        Timestamp *timestamp = &myData.timestamps[i];
        bool retro = (i > 0 &&
                      ((timestamp->position < (timestamp - 1)->position)
                       || ((timestamp->nanoseconds < (timestamp - 1)->nanoseconds))));
        const char *message = retro ? "  <= RETROGRADE!" : "";
        printf("Timestamp %3d : %8lld, %8lld %s\n", i,
               (long long) timestamp->position,
               (long long) timestamp->nanoseconds,
               message);
    }

    if (myData.schedulerChecked) {
        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
               myData.scheduler,
+70 −19
Original line number Diff line number Diff line
@@ -66,9 +66,9 @@ AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterfa
        , mStreamVolume(1.0f)
        , mInService(inService)
        , mServiceInterface(serviceInterface)
        , mAtomicTimestamp()
        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mAtomicTimestamp()
        {
    ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
          mWakeupDelayNanos, mMinimumSleepNanos);
@@ -250,25 +250,45 @@ static void *aaudio_callback_thread_proc(void *context)
    }
}

/*
 * It normally takes about 20-30 msec to start a stream on the server.
 * But the first time can take as much as 200-300 msec. The HW
 * starts right away so by the time the client gets a chance to write into
 * the buffer, it is already in a deep underflow state. That can cause the
 * XRunCount to be non-zero, which could lead an app to tune its latency higher.
 * To avoid this problem, we set a request for the processing code to start the
 * client stream at the same position as the server stream.
 * The processing code will then save the current offset
 * between client and server and apply that to any position given to the app.
 */
aaudio_result_t AudioStreamInternal::requestStart()
{
    int64_t startTime;
    ALOGD("AudioStreamInternal()::requestStart()");
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::requestStart() mServiceStreamHandle invalid");
        ALOGE("requestStart() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    if (isActive()) {
        ALOGE("AudioStreamInternal::requestStart() already active");
        ALOGE("requestStart() already active");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    aaudio_stream_state_t originalState = getState();

    aaudio_stream_state_t originalState = getState();
    if (originalState == AAUDIO_STREAM_STATE_DISCONNECTED) {
        ALOGE("requestStart() but DISCONNECTED");
        return AAUDIO_ERROR_DISCONNECTED;
    }
    setState(AAUDIO_STREAM_STATE_STARTING);
    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());

    // Clear any stale timestamps from the previous run.
    drainTimestampsFromService();

    status_t status = startWithStatus(); // Call PlayerBase, which will start the device stream.
    aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);

    startTime = AudioClock::getNanoseconds();
    mClockModel.start(startTime);
    mNeedCatchUp.request();  // Ask data processing code to catch up when first timestamp received.

    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
        // Launch the callback loop thread.
@@ -314,13 +334,14 @@ aaudio_result_t AudioStreamInternal::stopCallback()
aaudio_result_t AudioStreamInternal::requestStopInternal()
{
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
        ALOGE("requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
              mServiceStreamHandle);
        return AAUDIO_ERROR_INVALID_STATE;
    }

    mClockModel.stop(AudioClock::getNanoseconds());
    setState(AAUDIO_STREAM_STATE_STOPPING);
    mAtomicTimestamp.clear();
    return AAudioConvert_androidToAAudioResult(stopWithStatus());
}

@@ -336,7 +357,7 @@ aaudio_result_t AudioStreamInternal::requestStop()

aaudio_result_t AudioStreamInternal::registerThread() {
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::registerThread() mServiceStreamHandle invalid");
        ALOGE("registerThread() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
@@ -346,7 +367,7 @@ aaudio_result_t AudioStreamInternal::registerThread() {

aaudio_result_t AudioStreamInternal::unregisterThread() {
    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
        ALOGE("AudioStreamInternal::unregisterThread() mServiceStreamHandle invalid");
        ALOGE("unregisterThread() mServiceStreamHandle invalid");
        return AAUDIO_ERROR_INVALID_STATE;
    }
    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, gettid());
@@ -374,13 +395,15 @@ aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
    // Generated in server and passed to client. Return latest.
    if (mAtomicTimestamp.isValid()) {
        Timestamp timestamp = mAtomicTimestamp.read();
        *framePosition = timestamp.getPosition();
        int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
        if (position >= 0) {
            *framePosition = position;
            *timeNanoseconds = timestamp.getNanoseconds();
            return AAUDIO_OK;
    } else {
        return AAUDIO_ERROR_UNAVAILABLE;
        }
    }
    return AAUDIO_ERROR_UNAVAILABLE;
}

aaudio_result_t AudioStreamInternal::updateStateMachine() {
    if (isDataCallbackActive()) {
@@ -394,14 +417,14 @@ void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
    static int64_t oldTime = 0;
    int64_t framePosition = command.timestamp.position;
    int64_t nanoTime = command.timestamp.timestamp;
    ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
    ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
         (long long) framePosition,
         (long long) nanoTime);
    int64_t nanosDelta = nanoTime - oldTime;
    if (nanosDelta > 0 && oldTime > 0) {
        int64_t framesDelta = framePosition - oldPosition;
        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
        ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
        ALOGD("logTimestamp:     framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
              (long long) framesDelta, (long long) nanosDelta, (long long) rate);
    }
    oldPosition = framePosition;
@@ -478,6 +501,34 @@ aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *mes
    return result;
}

aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
    aaudio_result_t result = AAUDIO_OK;

    while (result == AAUDIO_OK) {
        AAudioServiceMessage message;
        if (mAudioEndpoint.readUpCommand(&message) != 1) {
            break; // no command this time, no problem
        }
        switch (message.what) {
            // ignore most messages
            case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
            case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
                break;

            case AAudioServiceMessage::code::EVENT:
                result = onEventFromServer(&message);
                break;

            default:
                ALOGE("WARNING - drainTimestampsFromService() Unrecognized what = %d",
                      (int) message.what);
                result = AAUDIO_ERROR_INTERNAL;
                break;
        }
    }
    return result;
}

// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
    aaudio_result_t result = AAUDIO_OK;
@@ -502,7 +553,7 @@ aaudio_result_t AudioStreamInternal::processCommands() {
            break;

        default:
            ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
            ALOGE("WARNING - processCommands() Unrecognized what = %d",
                 (int) message.what);
            result = AAUDIO_ERROR_INTERNAL;
            break;
@@ -613,7 +664,7 @@ aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
    }

    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
    ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
    ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
    if (result < 0) {
        return result;
    } else {
+7 −2
Original line number Diff line number Diff line
@@ -115,12 +115,15 @@ protected:
                            int64_t currentTimeNanos,
                            int64_t *wakeTimePtr) = 0;

    aaudio_result_t drainTimestampsFromService();

    aaudio_result_t processCommands();

    aaudio_result_t requestStopInternal();

    aaudio_result_t stopCallback();

    virtual void advanceClientToMatchServerPosition() = 0;

    virtual void onFlushFromServer() {}

@@ -167,6 +170,10 @@ protected:

    AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service

    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;

    AtomicRequestor          mNeedCatchUp;   // Ask read() or write() to sync on first timestamp.

private:
    /*
     * Asynchronous write with data conversion.
@@ -188,8 +195,6 @@ private:
    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses

    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;

    int64_t                  mServiceLatencyNanos = 0;

};
Loading