Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 97f78f36 authored by Android Build Coastguard Worker's avatar Android Build Coastguard Worker
Browse files

Snap for 11704135 from f37f18e3 to 24Q3-release

Change-Id: Ibe9c950b2d4a1019b03b7be8a8609c25e515cc79
parents 51ae084a f37f18e3
Loading
Loading
Loading
Loading
+2 −15
Original line number Diff line number Diff line
@@ -130,11 +130,7 @@ status_t AudioRecord::getMetrics(mediametrics::Item * &item)
}

AudioRecord::AudioRecord(const AttributionSourceState &client)
    : mActive(false), mStatus(NO_INIT), mClientAttributionSource(client),
      mSessionId(AUDIO_SESSION_ALLOCATE), mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT), mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE), mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
      mSelectedMicFieldDimension(MIC_FIELD_DIMENSION_DEFAULT)
    : mClientAttributionSource(client)
{
}

@@ -154,13 +150,7 @@ AudioRecord::AudioRecord(
        audio_port_handle_t selectedDeviceId,
        audio_microphone_direction_t selectedMicDirection,
        float microphoneFieldDimension)
    : mActive(false),
      mStatus(NO_INIT),
      mClientAttributionSource(client),
      mSessionId(AUDIO_SESSION_ALLOCATE),
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT),
      mProxy(nullptr)
    : mClientAttributionSource(client)
{
    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
@@ -199,9 +189,6 @@ AudioRecord::~AudioRecord()
}

void AudioRecord::stopAndJoinCallbacks() {
    // Prevent nullptr crash if it did not open properly.
    if (mStatus != NO_ERROR) return;

    // Make sure that callback function exits in the case where
    // it is looping on buffer empty condition in obtainBuffer().
    // Otherwise the callback thread will never exit.
+16 −13
Original line number Diff line number Diff line
@@ -681,7 +681,7 @@ private:

    // Current client state:  false = stopped, true = active.  Protected by mLock.  If more states
    // are added, consider changing this to enum State { ... } mState as in AudioTrack.
    bool                    mActive;
    bool mActive = false;

    // for client callback handler

@@ -708,7 +708,7 @@ private:
    Modulo<uint32_t>        mNewPosition;           // in frames
    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS

    status_t                mStatus;
    status_t mStatus = NO_INIT;

    android::content::AttributionSourceState mClientAttributionSource; // Owner's attribution source

@@ -736,8 +736,8 @@ private:
                                                    // held to read or write those bits reliably.
    audio_input_flags_t     mOrigFlags;             // as specified in constructor or set(), const

    audio_session_t         mSessionId;
    audio_port_handle_t     mPortId;                    // Id from Audio Policy Manager
    audio_session_t mSessionId = AUDIO_SESSION_ALLOCATE;
    audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;

    /**
     * mLogSessionId is a string identifying this AudioRecord for the metrics service.
@@ -756,9 +756,9 @@ private:
    sp<IMemory>             mBufferMemory;
    audio_io_handle_t       mInput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getInputforAttr()

    int                     mPreviousPriority;  // before start()
    SchedPolicy             mPreviousSchedulingGroup;
    bool                    mAwaitBoost;    // thread should wait for priority boost before running
    int mPreviousPriority = ANDROID_PRIORITY_NORMAL;  // before start()
    SchedPolicy mPreviousSchedulingGroup = SP_DEFAULT;
    bool mAwaitBoost = false;  // thread should wait for priority boost before running

    // The proxy should only be referenced while a lock is held because the proxy isn't
    // multi-thread safe.
@@ -799,14 +799,17 @@ private:

    // For Device Selection API
    //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
    audio_port_handle_t     mSelectedDeviceId; // Device requested by the application.
    audio_port_handle_t     mRoutedDeviceId;   // Device actually selected by audio policy manager:
                                              // May not match the app selection depending on other
                                              // activity and connected devices

    // Device requested by the application.
    audio_port_handle_t     mSelectedDeviceId = AUDIO_PORT_HANDLE_NONE;
    // Device actually selected by AudioPolicyManager: This may not match the app
    // selection depending on other activity and connected devices
    audio_port_handle_t     mRoutedDeviceId = AUDIO_PORT_HANDLE_NONE;

    wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;

    audio_microphone_direction_t mSelectedMicDirection;
    float mSelectedMicFieldDimension;
    audio_microphone_direction_t mSelectedMicDirection = MIC_DIRECTION_UNSPECIFIED;
    float mSelectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT;

    int32_t                    mMaxSharedAudioHistoryMs = 0;
    std::string                mSharedAudioPackageName = {};
+19 −0
Original line number Diff line number Diff line
@@ -28,6 +28,25 @@

using namespace android;

// Test that the basic constructor returns an object that doesn't crash
// on stop() or destruction.

TEST(AudioRecordTestBasic, EmptyAudioRecord) {
    AttributionSourceState attributionSource;
    attributionSource.packageName = "AudioRecordTest";
    attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
    attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
    attributionSource.token = sp<BBinder>::make();
    const auto ar = sp<AudioRecord>::make(attributionSource);

    // test key commands on an unset AudioRecord.
    EXPECT_EQ(NO_INIT, ar->initCheck());
    EXPECT_EQ(true, ar->stopped());

    // just don't crash.
    ar->stop();
}

class AudioRecordTest : public ::testing::Test {
  public:
    void SetUp() override {
+8 −4
Original line number Diff line number Diff line
@@ -415,8 +415,10 @@ status_t StreamHalAidl::exit() {

void StreamHalAidl::onAsyncTransferReady() {
    if (auto state = getState(); state == StreamDescriptor::State::TRANSFERRING) {
        // Retrieve the current state together with position counters.
        updateCountersIfNeeded();
        // Retrieve the current state together with position counters unconditionally
        // to ensure that the state on our side gets updated.
        sendCommand(makeHalCommand<HalCommand::Tag::getStatus>(),
                nullptr, true /*safeFromNonWorkerThread */);
    } else {
        ALOGW("%s: unexpected onTransferReady in the state %s", __func__, toString(state).c_str());
    }
@@ -424,8 +426,10 @@ void StreamHalAidl::onAsyncTransferReady() {

void StreamHalAidl::onAsyncDrainReady() {
    if (auto state = getState(); state == StreamDescriptor::State::DRAINING) {
        // Retrieve the current state together with position counters.
        updateCountersIfNeeded();
        // Retrieve the current state together with position counters unconditionally
        // to ensure that the state on our side gets updated.
        sendCommand(makeHalCommand<HalCommand::Tag::getStatus>(),
                nullptr, true /*safeFromNonWorkerThread */);
    } else {
        ALOGW("%s: unexpected onDrainReady in the state %s", __func__, toString(state).c_str());
    }
+46 −29
Original line number Diff line number Diff line
@@ -353,46 +353,63 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {
        return status;
    }

    std::vector<float> inFrames(samples);
    std::vector<float> outFrames(frameCount * FCC_2);
    std::vector<float> inputSamples;
    std::vector<float> outputSamples(frameCount * FCC_2);

    if (isPreset() && mNextPreset != mPreset) {
        loadPreset();
    }

    if (isAuxiliary()) {
        inFrames.assign(in, in + samples);
        inputSamples.resize(samples);
        inputSamples.assign(in, in + samples);
    } else {
        // mono input is duplicated
        // Resizing to stereo is required to duplicate mono input
        inputSamples.resize(frameCount * FCC_2);
        if (channels >= FCC_2) {
            for (int i = 0; i < frameCount; i++) {
                inFrames[FCC_2 * i] = in[channels * i] * kSendLevel;
                inFrames[FCC_2 * i + 1] = in[channels * i + 1] * kSendLevel;
                inputSamples[FCC_2 * i] = in[channels * i] * kSendLevel;
                inputSamples[FCC_2 * i + 1] = in[channels * i + 1] * kSendLevel;
            }
        } else {
            for (int i = 0; i < frameCount; i++) {
                inFrames[FCC_2 * i] = inFrames[FCC_2 * i + 1] = in[i] * kSendLevel;
                inputSamples[FCC_2 * i] = inputSamples[FCC_2 * i + 1] = in[i] * kSendLevel;
            }
        }
    }

    if (isPreset() && mPreset == PresetReverb::Presets::NONE) {
        std::fill(outFrames.begin(), outFrames.end(), 0);  // always stereo here
        std::fill(outputSamples.begin(), outputSamples.end(), 0);  // always stereo here
    } else {
        if (!mEnabled && mSamplesToExitCount > 0) {
            std::fill(outFrames.begin(), outFrames.end(), 0);
            std::fill(outputSamples.begin(), outputSamples.end(), 0);
        }
        int inputBufferIndex = 0;
        int outputBufferIndex = 0;

        // LVREV library supports max of int16_t frames at a time
        constexpr int kMaxBlockFrames = std::numeric_limits<int16_t>::max();
        const auto inputFrameSize = getInputFrameSize();
        const auto outputFrameSize = getOutputFrameSize();

        /* Process the samples, producing a stereo output */
        for (int fc = frameCount; fc > 0;) {
            int processFrames = std::min(fc, kMaxBlockFrames);
            LVREV_ReturnStatus_en lvrevStatus =
                    LVREV_Process(mInstance,                            /* Instance handle */
                              inFrames.data(),  /* Input buffer */
                              outFrames.data(), /* Output buffer */
                              frameCount);      /* Number of samples to read */
                                  inputSamples.data() + inputBufferIndex,   /* Input buffer */
                                  outputSamples.data() + outputBufferIndex, /* Output buffer */
                                  processFrames); /* Number of samples to process */
            if (lvrevStatus != LVREV_SUCCESS) {
                LOG(ERROR) << __func__ << " LVREV_Process error: " << lvrevStatus;
                return {EX_UNSUPPORTED_OPERATION, 0, 0};
            }

            fc -= processFrames;

            inputBufferIndex += processFrames * inputFrameSize / sizeof(float);
            outputBufferIndex += processFrames * outputFrameSize / sizeof(float);
        }
    }
    // Convert to 16 bits
    if (isAuxiliary()) {
@@ -401,14 +418,14 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {
        if (channels >= FCC_2) {
            for (int i = 0; i < frameCount; i++) {
                // Mix with dry input
                outFrames[FCC_2 * i] += in[channels * i];
                outFrames[FCC_2 * i + 1] += in[channels * i + 1];
                outputSamples[FCC_2 * i] += in[channels * i];
                outputSamples[FCC_2 * i + 1] += in[channels * i + 1];
            }
        } else {
            for (int i = 0; i < frameCount; i++) {
                // Mix with dry input
                outFrames[FCC_2 * i] += in[i];
                outFrames[FCC_2 * i + 1] += in[i];
                outputSamples[FCC_2 * i] += in[i];
                outputSamples[FCC_2 * i + 1] += in[i];
            }
        }

@@ -420,8 +437,8 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {
            float incr = (mVolume.right - vr) / frameCount;

            for (int i = 0; i < frameCount; i++) {
                outFrames[FCC_2 * i] *= vl;
                outFrames[FCC_2 * i + 1] *= vr;
                outputSamples[FCC_2 * i] *= vl;
                outputSamples[FCC_2 * i + 1] *= vr;

                vl += incl;
                vr += incr;
@@ -430,8 +447,8 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {
        } else if (volumeMode != VOLUME_OFF) {
            if (mVolume.left != kUnitVolume || mVolume.right != kUnitVolume) {
                for (int i = 0; i < frameCount; i++) {
                    outFrames[FCC_2 * i] *= mVolume.left;
                    outFrames[FCC_2 * i + 1] *= mVolume.right;
                    outputSamples[FCC_2 * i] *= mVolume.left;
                    outputSamples[FCC_2 * i + 1] *= mVolume.right;
                }
            }
            mPrevVolume = mVolume;
@@ -441,8 +458,8 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {

    if (outChannels > 2) {
        for (int i = 0; i < frameCount; i++) {
            out[outChannels * i] = outFrames[FCC_2 * i];
            out[outChannels * i + 1] = outFrames[FCC_2 * i + 1];
            out[outChannels * i] = outputSamples[FCC_2 * i];
            out[outChannels * i + 1] = outputSamples[FCC_2 * i + 1];
        }
        if (!isAuxiliary()) {
            for (int i = 0; i < frameCount; i++) {
@@ -454,10 +471,10 @@ IEffect::Status ReverbContext::process(float* in, float* out, int samples) {
        }
    } else {
        if (outChannels == FCC_1) {
            From2iToMono_Float(outFrames.data(), out, frameCount);
            From2iToMono_Float(outputSamples.data(), out, frameCount);
        } else {
            for (int i = 0; i < frameCount * FCC_2; i++) {
                out[i] = outFrames[i];
                out[i] = outputSamples[i];
            }
        }
    }
Loading