Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dde8d48a authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Android (Google) Code Review
Browse files

Merge "aaudio: improve accuracy of timestamps" into oc-mr1-dev

parents 8b852805 97350f9d
Loading
Loading
Loading
Loading
+36 −3
Original line number Diff line number Diff line
@@ -30,6 +30,8 @@
#define REQUIRED_FORMAT    AAUDIO_FORMAT_PCM_I16
#define MIN_FRAMES_TO_READ 48  /* arbitrary, 1 msec at 48000 Hz */

static const int FRAMES_PER_LINE = 20000;

int main(int argc, const char **argv)
{
    AAudioArgsParser   argParser;
@@ -46,7 +48,10 @@ int main(int argc, const char **argv)
    int32_t framesPerRead = 0;
    int32_t framesToRecord = 0;
    int32_t framesLeft = 0;
    int32_t nextFrameCount = 0;
    int32_t frameCount = 0;
    int32_t xRunCount = 0;
    int64_t previousFramePosition = -1;
    int16_t *data = nullptr;
    float peakLevel = 0.0;
    int loopCounter = 0;
@@ -56,7 +61,7 @@ int main(int argc, const char **argv)
    // in a buffer if we hang or crash.
    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);

    printf("%s - Monitor input level using AAudio\n", argv[0]);
    printf("%s - Monitor input level using AAudio V0.1.1\n", argv[0]);

    argParser.setFormat(REQUIRED_FORMAT);
    if (argParser.parseArgs(argc, argv)) {
@@ -133,6 +138,7 @@ int main(int argc, const char **argv)
            goto finish;
        }
        framesLeft -= actual;
        frameCount += actual;

        // Peak finder.
        for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
@@ -143,9 +149,36 @@ int main(int argc, const char **argv)
        }

        // Display level as stars, eg. "******".
        if ((loopCounter++ % 10) == 0) {
        if (frameCount > nextFrameCount) {
            displayPeakLevel(peakLevel);
            peakLevel = 0.0;
            nextFrameCount += FRAMES_PER_LINE;
        }

        // Print timestamps.
        int64_t framePosition = 0;
        int64_t frameTime = 0;
        aaudio_result_t timeResult;
        timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
                                               &framePosition, &frameTime);

        if (timeResult == AAUDIO_OK) {
            if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
                int64_t realTime = getNanoseconds();
                int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);

                double latencyMillis = calculateLatencyMillis(framesRead, realTime,
                                                              framePosition, frameTime,
                                                              actualSampleRate);

                printf("--- timestamp: result = %4d, position = %lld, at %lld nanos"
                               ", latency = %7.2f msec\n",
                       timeResult,
                       (long long) framePosition,
                       (long long) frameTime,
                       latencyMillis);
                previousFramePosition = framePosition;
            }
        }
    }

+22 −2
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND      (NANOS_PER_MILLISECOND * 1000)

static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *getSharingModeText(aaudio_sharing_mode_t mode) {
    const char *modeText = "unknown";
    switch (mode) {
    case AAUDIO_SHARING_MODE_EXCLUSIVE:
@@ -49,7 +49,7 @@ static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
    return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
}

void displayPeakLevel(float peakLevel) {
static void displayPeakLevel(float peakLevel) {
    printf("%5.3f ", peakLevel);
    const int maxStars = 50; // arbitrary, fits on one line
    int numStars = (int) (peakLevel * maxStars);
@@ -59,4 +59,24 @@ void displayPeakLevel(float peakLevel) {
    printf("\n");
}

/**
 * @param position1 position of hardware frame
 * @param nanoseconds1
 * @param position2 position of client read/write
 * @param nanoseconds2
 * @param sampleRate
 * @return latency in milliseconds
 */
static double calculateLatencyMillis(int64_t position1, int64_t nanoseconds1,
                              int64_t position2, int64_t nanoseconds2,
                              int64_t sampleRate) {
    int64_t deltaFrames = position2 - position1;
    int64_t deltaTime =
            (NANOS_PER_SECOND * deltaFrames / sampleRate);
    int64_t timeCurrentFramePlayed = nanoseconds1 + deltaTime;
    int64_t latencyNanos = timeCurrentFramePlayed - nanoseconds2;
    double latencyMillis = latencyNanos / 1000000.0;
    return latencyMillis;
}

#endif // AAUDIO_EXAMPLE_UTILS_H
+2 −2
Original line number Diff line number Diff line
@@ -28,7 +28,6 @@ namespace aaudio {
// Used to send information about the HAL to the client.
struct AAudioMessageTimestamp {
    int64_t position;     // number of frames transferred so far
    int64_t deviceOffset; // add to client position to get device position
    int64_t timestamp;    // time when that position was reached
};

@@ -51,7 +50,8 @@ struct AAudioMessageEvent {
typedef struct AAudioServiceMessage_s {
    enum class code : uint32_t {
        NOTHING,
        TIMESTAMP,
        TIMESTAMP_SERVICE, // when frame is read or written by the service to the client
        TIMESTAMP_HARDWARE, // when frame is at DAC or ADC
        EVENT,
    };

+23 −9
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterfa
        , mServiceInterface(serviceInterface)
        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mAtomicTimestamp()
        {
    ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
          mWakeupDelayNanos, mMinimumSleepNanos);
@@ -351,12 +352,15 @@ aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t clientHandle
aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
                           int64_t *framePosition,
                           int64_t *timeNanoseconds) {
    // TODO Generate in server and pass to client. Return latest.
    int64_t time = AudioClock::getNanoseconds();
    *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService;
    // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
    *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
    // Generated in server and passed to client. Return latest.
    if (mAtomicTimestamp.isValid()) {
        Timestamp timestamp = mAtomicTimestamp.read();
        *framePosition = timestamp.getPosition();
        *timeNanoseconds = timestamp.getNanoseconds();
        return AAUDIO_OK;
    } else {
        return AAUDIO_ERROR_UNAVAILABLE;
    }
}

aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
@@ -385,7 +389,7 @@ void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
    oldTime = nanoTime;
}

aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) {
#if LOG_TIMESTAMPS
    logTimestamp(*message);
#endif
@@ -393,6 +397,12 @@ aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage
    return AAUDIO_OK;
}

aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
    Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
    mAtomicTimestamp.write(timestamp);
    return AAUDIO_OK;
}

aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
    aaudio_result_t result = AAUDIO_OK;
    switch (message->event.event) {
@@ -456,8 +466,12 @@ aaudio_result_t AudioStreamInternal::processCommands() {
            break; // no command this time, no problem
        }
        switch (message.what) {
        case AAudioServiceMessage::code::TIMESTAMP:
            result = onTimestampFromServer(&message);
        case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
            result = onTimestampService(&message);
            break;

        case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
            result = onTimestampHardware(&message);
            break;

        case AAudioServiceMessage::code::EVENT:
+8 −1
Original line number Diff line number Diff line
@@ -122,7 +122,9 @@ protected:

    aaudio_result_t onEventFromServer(AAudioServiceMessage *message);

    aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
    aaudio_result_t onTimestampService(AAudioServiceMessage *message);

    aaudio_result_t onTimestampHardware(AAudioServiceMessage *message);

    void logTimestamp(AAudioServiceMessage &message);

@@ -181,6 +183,11 @@ private:

    AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
    EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses

    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;

    int64_t                  mServiceLatencyNanos = 0;

};

} /* namespace aaudio */
Loading