Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3f0c902b authored by Andy Hung's avatar Andy Hung
Browse files

Add AudioRecord timestamps

Bug: 13569372
Bug: 22886739
Change-Id: Ibc81afefb733d23676a632a0f2da31163fdbe05f
parent 00803f7e
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@

#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
#include <media/IAudioRecord.h>
#include <media/Modulo.h>
#include <utils/threads.h>
@@ -314,6 +315,17 @@ public:
     */
            status_t    getPosition(uint32_t *position) const;

    /* Return the record timestamp.
     *
     * Parameters:
     *  timestamp: A pointer to the timestamp to be filled.
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - BAD_VALUE: timestamp is NULL
     */
            status_t getTimestamp(ExtendedTimestamp *timestamp);

    /* Returns a handle on the audio input used by this AudioRecord.
     *
     * Parameters:
@@ -571,6 +583,11 @@ private:
    size_t                  mReqFrameCount;         // frame count to request the first or next time
                                                    // a new IAudioRecord is needed, non-decreasing

    int64_t                 mFramesRead;            // total frames read. reset to zero after
                                                    // the start() following stop(). It is not
                                                    // changed after restoring the track.
    int64_t                 mFramesReadServerOffset; // An offset to server frames read due to
                                                    // restoring AudioRecord, or stop/start.
    // constant after constructor or set()
    uint32_t                mSampleRate;
    audio_format_t          mFormat;
+81 −0
Original line number Diff line number Diff line
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_TIMESTAMP_H
#define ANDROID_AUDIO_TIMESTAMP_H

#include <string>
#include <sstream>
#include <time.h>

namespace android {
@@ -32,6 +34,85 @@ public:
    struct timespec mTime;     // corresponding CLOCK_MONOTONIC when frame is expected to present
};

struct ExtendedTimestamp {
    enum Location {
        LOCATION_CLIENT,   // timestamp of last read frame from client-server track buffer
        LOCATION_SERVER,   // timestamp of newest frame from client-server track buffer
        LOCATION_KERNEL,   // timestamp of newest frame in the kernel (alsa) buffer.
        LOCATION_MAX       // for sizing arrays only
    };

    // This needs to be kept in sync with android.media.AudioTimestamp
    enum Timebase {
        TIMEBASE_MONOTONIC,  // Clock monotonic offset (generally 0)
        TIMEBASE_BOOTTIME,
        TIMEBASE_MAX,
    };

    ExtendedTimestamp() {
        clear();
    }

    // mPosition is expressed in frame units.
    // It is generally nonnegative, though we keep this signed for
    // to potentially express algorithmic latency at the start of the stream
    // and to prevent unintentional unsigned integer underflow.
    int64_t mPosition[LOCATION_MAX];

    // mTimeNs is in nanoseconds for the default timebase, monotonic.
    // If this value is -1, then both time and position are invalid.
    // If this value is 0, then the time is not valid but the position is valid.
    int64_t mTimeNs[LOCATION_MAX];

    // mTimebaseOffset is the offset in ns from monotonic when the
    // timestamp was taken.  This may vary due to suspend time
    // or NTP adjustment.
    int64_t mTimebaseOffset[TIMEBASE_MAX];

    void clear() {
        memset(mPosition, 0, sizeof(mPosition)); // actually not necessary if time is -1
        for (int i = 0; i < LOCATION_MAX; ++i) {
            mTimeNs[i] = -1;
        }
        memset(mTimebaseOffset, 0, sizeof(mTimebaseOffset));
    }

    // Returns the best timestamp as judged from the closest-to-hw stage in the
    // pipeline with a valid timestamp.
    int getBestTimestamp(int64_t *position, int64_t *time, int timebase) {
        if (position == nullptr || time == nullptr
                || timebase < 0 || timebase >= TIMEBASE_MAX) {
            return BAD_VALUE;
        }
        // look for the closest-to-hw stage in the pipeline with a valid timestamp.
        // We omit LOCATION_CLIENT as we prefer at least LOCATION_SERVER based accuracy
        // when getting the best timestamp.
        for (int i = LOCATION_MAX - 1; i >= LOCATION_SERVER; --i) {
            if (mTimeNs[i] > 0) {
                *position = mPosition[i];
                *time = mTimeNs[i] + mTimebaseOffset[timebase];
                return OK;
            }
        }
        return INVALID_OPERATION;
    }

    // convert fields to a printable string
    std::string toString() {
        std::stringstream ss;

        ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
        for (int i = 0; i < LOCATION_MAX; ++i) {
            ss << "ExtendedTimestamp[" << i << "]  position: "
                    << mPosition[i] << "  time: "  << mTimeNs[i] << "\n";
        }
        return ss.str();
    }
    // TODO:
    // Consider adding buffer status:
    // size, available, algorithmic latency
};

}   // namespace

#endif  // ANDROID_AUDIO_TIMESTAMP_H
+51 −3
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <utils/RefBase.h>
#include <audio_utils/roundup.h>
#include <media/AudioResamplerPublic.h>
#include <media/AudioTimestamp.h>
#include <media/Modulo.h>
#include <media/SingleStateQueue.h>

@@ -118,6 +119,8 @@ struct AudioTrackSharedStatic {

typedef SingleStateQueue<AudioPlaybackRate> PlaybackRateQueue;

typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampQueue;

// ----------------------------------------------------------------------------

// Important: do not add any virtual methods, including ~
@@ -171,6 +174,8 @@ private:

                uint16_t    mPad2;           // unused

                // server write-only, client read
                ExtendedTimestampQueue::Shared mExtendedTimestampQueue;
public:

    volatile    int32_t     mFlags;         // combinations of CBLK_*
@@ -426,8 +431,39 @@ public:
    AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
            size_t frameSize)
        : ClientProxy(cblk, buffers, frameCount, frameSize,
            false /*isOut*/, false /*clientInServer*/) { }
            false /*isOut*/, false /*clientInServer*/)
        , mTimestampObserver(&cblk->mExtendedTimestampQueue) { }
    ~AudioRecordClientProxy() { }

    status_t    getTimestamp(ExtendedTimestamp *timestamp) {
        if (timestamp == nullptr) {
            return BAD_VALUE;
        }
        (void) mTimestampObserver.poll(mTimestamp);
        *timestamp = mTimestamp;
        return OK;
    }

    void        clearTimestamp() {
        mTimestamp.clear();
    }

    // Advances the client read pointer to the server write head pointer
    // effectively flushing the client read buffer. The effect is
    // instantaneous. Returns the number of frames flushed.
    uint32_t    flush() {
        int32_t rear = android_atomic_acquire_load(&mCblk->u.mStreaming.mRear);
        int32_t front = mCblk->u.mStreaming.mFront;
        android_atomic_release_store(rear, &mCblk->u.mStreaming.mFront);
        return (Modulo<int32_t>(rear) - front).unsignedValue();
    }

private:
    // The shared buffer contents referred to by the timestamp observer
    // is initialized when the server proxy created.  A local zero timestamp
    // is initialized by the client constructor.
    ExtendedTimestampQueue::Observer mTimestampObserver;
    ExtendedTimestamp mTimestamp; // initialized by constructor
};

// ----------------------------------------------------------------------------
@@ -476,6 +512,7 @@ public:
protected:
    size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
    int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
    int64_t     mReleased;      // our copy of cblk->mServer, at 64 bit resolution
};

// Proxy used by AudioFlinger for servicing AudioTrack
@@ -520,7 +557,7 @@ public:
    virtual uint32_t    getUnderrunFrames() const { return mCblk->u.mStreaming.mUnderrunFrames; }

    // Return the total number of frames that AudioFlinger has obtained and released
    virtual size_t      framesReleased() const { return mCblk->mServer; }
    virtual size_t      framesReleased() const { return mReleased; }

    // Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
    AudioPlaybackRate getPlaybackRate();
@@ -574,9 +611,20 @@ class AudioRecordServerProxy : public ServerProxy {
public:
    AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
            size_t frameSize, bool clientInServer)
        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer)
        , mTimestampMutator(&cblk->mExtendedTimestampQueue) { }

    // Return the total number of frames that AudioFlinger has obtained and released
    virtual int64_t     framesReleased() const { return mReleased; }

    // Expose timestamp to client proxy. Should only be called by a single thread.
    virtual void        setExtendedTimestamp(const ExtendedTimestamp &timestamp) {
                            mTimestampMutator.push(timestamp);
                        }
protected:
    virtual ~AudioRecordServerProxy() { }

    ExtendedTimestampQueue::Mutator       mTimestampMutator;
};

// ----------------------------------------------------------------------------
+41 −1
Original line number Diff line number Diff line
@@ -284,6 +284,8 @@ status_t AudioRecord::set(
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;
    mFramesRead = 0;
    mFramesReadServerOffset = 0;

    return NO_ERROR;
}
@@ -299,6 +301,12 @@ status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
        return NO_ERROR;
    }

    // discard data in buffer
    const uint32_t framesFlushed = mProxy->flush();
    mFramesReadServerOffset -= mFramesRead + framesFlushed;
    mFramesRead = 0;
    mProxy->clearTimestamp();  // timestamp is invalid until next server push

    // reset current position as seen by client to 0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
    // force refresh of remaining frames by processAudioBuffer() as last
@@ -449,6 +457,27 @@ uint32_t AudioRecord::getInputFramesLost() const
    return AudioSystem::getInputFramesLost(getInputPrivate());
}

status_t AudioRecord::getTimestamp(ExtendedTimestamp *timestamp)
{
    if (timestamp == nullptr) {
        return BAD_VALUE;
    }
    AutoMutex lock(mLock);
    status_t status = mProxy->getTimestamp(timestamp);
    if (status == OK) {
        timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesRead;
        timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
        // server side frame offset in case AudioRecord has been restored.
        for (int i = ExtendedTimestamp::LOCATION_SERVER;
                i < ExtendedTimestamp::LOCATION_MAX; ++i) {
            if (timestamp->mTimeNs[i] >= 0) {
                timestamp->mPosition[i] += mFramesReadServerOffset;
            }
        }
    }
    return status;
}

// ---- Explicit Routing ---------------------------------------------------
status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
    AutoMutex lock(mLock);
@@ -837,7 +866,10 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize, bool blocking)

        releaseBuffer(&audioBuffer);
    }

    if (read > 0) {
        mFramesRead += read / mFrameSize;
        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
    }
    return read;
}

@@ -988,6 +1020,7 @@ nsecs_t AudioRecord::processAudioBuffer()
        requested = &timeout;
    }

    size_t readFrames = 0;
    while (mRemainingFrames > 0) {

        Buffer audioBuffer;
@@ -1049,6 +1082,7 @@ nsecs_t AudioRecord::processAudioBuffer()
        }

        releaseBuffer(&audioBuffer);
        readFrames += releasedFrames;

        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
        // if callback doesn't like to accept the full chunk
@@ -1072,6 +1106,11 @@ nsecs_t AudioRecord::processAudioBuffer()
#endif

    }
    if (readFrames > 0) {
        AutoMutex lock(mLock);
        mFramesRead += readFrames;
        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
    }
    mRemainingFrames = notificationFrames;
    mRetryOnPartialBuffer = true;

@@ -1096,6 +1135,7 @@ status_t AudioRecord::restoreRecord_l(const char *from)
            // FIXME this fails if we have a new AudioFlinger instance
            result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
        }
        mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
    }
    if (result != NO_ERROR) {
        ALOGW("restoreRecord_l() failed status %d", result);
+4 −1
Original line number Diff line number Diff line
@@ -597,7 +597,7 @@ void StaticAudioTrackClientProxy::getBufferPositionAndLoopCount(
ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
        size_t frameSize, bool isOut, bool clientInServer)
    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
      mAvailToClient(0), mFlush(0)
      mAvailToClient(0), mFlush(0), mReleased(0)
{
}

@@ -733,6 +733,7 @@ void ServerProxy::releaseBuffer(Buffer* buffer)
    }

    cblk->mServer += stepCount;
    mReleased += stepCount;

    size_t half = mFrameCount / 2;
    if (half == 0) {
@@ -1033,6 +1034,8 @@ void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
    mFramesReadySafe = clampToSize(mFramesReady);

    cblk->mServer += stepCount;
    mReleased += stepCount;

    // This may overflow, but client is not supposed to rely on it
    StaticAudioTrackPosLoop posLoop;
    posLoop.mBufferPosition = mState.mPosition;
Loading