Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b09e533 authored by Paul McLean's avatar Paul McLean
Browse files

JNI plumbing for native audio routing API

Bug: 23899814
Change-Id: I3a831bb661fbdfe1981ae3482fcc8773c7df22b6
parent 3070d75b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -20021,6 +20021,7 @@ package android.media {
  public abstract interface AudioRouting {
    method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
    method public abstract android.media.AudioDeviceInfo getPreferredDevice();
    method public abstract android.media.AudioDeviceInfo getRoutedDevice();
    method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
    method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
  }
+1 −0
Original line number Diff line number Diff line
@@ -21529,6 +21529,7 @@ package android.media {
  public abstract interface AudioRouting {
    method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
    method public abstract android.media.AudioDeviceInfo getPreferredDevice();
    method public abstract android.media.AudioDeviceInfo getRoutedDevice();
    method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
    method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
  }
+1 −0
Original line number Diff line number Diff line
@@ -20030,6 +20030,7 @@ package android.media {
  public abstract interface AudioRouting {
    method public abstract void addOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener, android.os.Handler);
    method public abstract android.media.AudioDeviceInfo getPreferredDevice();
    method public abstract android.media.AudioDeviceInfo getRoutedDevice();
    method public abstract void removeOnRoutingListener(android.media.AudioRouting.OnRoutingChangedListener);
    method public abstract boolean setPreferredDevice(android.media.AudioDeviceInfo);
  }
+132 −98
Original line number Diff line number Diff line
@@ -181,35 +181,68 @@ static sp<AudioRecord> setAudioRecord(JNIEnv* env, jobject thiz, const sp<AudioR
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName)
        jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,
        jlong nativeRecordInJavaObj)
{
    jint elements[1];
    env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
    int sampleRateInHertz = elements[0];

    //ALOGV(">> Entering android_media_AudioRecord_setup");
    //ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d",
    //     sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes);
    //ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d "
    //     "nativeRecordInJavaObj=0x%llX",
    //     sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes, nativeRecordInJavaObj);
    audio_channel_mask_t localChanMask = inChannelMaskToNative(channelMask);

    if (jSession == NULL) {
        ALOGE("Error creating AudioRecord: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    audio_attributes_t *paa = NULL;
    sp<AudioRecord> lpRecorder = 0;
    audiorecord_callback_cookie *lpCallbackData = NULL;

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
    }

    // if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.
    if (nativeRecordInJavaObj == 0) {
        if (jaa == 0) {
            ALOGE("Error creating AudioRecord: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }

        if (jSampleRate == 0) {
            ALOGE("Error creating AudioRecord: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }
        jint elements[1];
        env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
        int sampleRateInHertz = elements[0];

        // channel index mask takes priority over channel position masks.
        if (channelIndexMask) {
            // Java channel index masks need the representation bits set.
        channelMask = audio_channel_mask_from_representation_and_bits(
            localChanMask = audio_channel_mask_from_representation_and_bits(
                    AUDIO_CHANNEL_REPRESENTATION_INDEX,
                    channelIndexMask);
        }
        // Java channel position masks map directly to the native definition

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", channelMask);
        if (!audio_is_input_channel(localChanMask)) {
            ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", localChanMask);
            return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;
        }
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
        uint32_t channelCount = audio_channel_count_from_in_mask(localChanMask);

        // compare the format against the Java constants
        audio_format_t format = audioFormatToNative(audioFormat);
@@ -227,32 +260,11 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        size_t frameSize = channelCount * bytesPerSample;
        size_t frameCount = buffSizeInBytes / frameSize;

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
    }

    if (jSession == NULL) {
        ALOGE("Error creating AudioRecord: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioRecord: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

        ScopedUtfChars opPackageNameStr(env, opPackageName);

        // create an uninitialized AudioRecord object
    sp<AudioRecord> lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));
        lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));

    audio_attributes_t *paa = NULL;
        // read the AudioAttributes values
        paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
        const jstring jtags =
@@ -271,7 +283,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        }
        // create the callback information:
        // this data will be passed with every AudioRecord callback
    audiorecord_callback_cookie *lpCallbackData = new audiorecord_callback_cookie;
        lpCallbackData = new audiorecord_callback_cookie;
        lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioRecord object can be garbage collected.
        lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
@@ -280,7 +292,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        const status_t status = lpRecorder->set(paa->source,
            sampleRateInHertz,
            format,        // word length, PCM
        channelMask,
            localChanMask,
            frameCount,
            recorderCallback,// callback_t
            lpCallbackData,// void* user
@@ -297,6 +309,28 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
                    status);
            goto native_init_failure;
        }
    } else { // end if nativeRecordInJavaObj == 0)
        lpRecorder = (AudioRecord*)nativeRecordInJavaObj;
        // TODO: We need to find out which members of the Java AudioRecord might need to be
        // initialized from the Native AudioRecord
        // these are directly returned from getters:
        //  mSampleRate
        //  mRecordSource
        //  mAudioFormat
        //  mChannelMask
        //  mChannelCount
        //  mState (?)
        //  mRecordingState (?)
        //  mPreferredDevice

        // create the callback information:
        // this data will be passed with every AudioRecord callback
        lpCallbackData = new audiorecord_callback_cookie;
        lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioRecord object can be garbage collected.
        lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
        lpCallbackData->busy = false;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
@@ -726,7 +760,7 @@ static const JNINativeMethod gMethods[] = {
    // name,               signature,  funcPtr
    {"native_start",         "(II)I",    (void *)android_media_AudioRecord_start},
    {"native_stop",          "()V",    (void *)android_media_AudioRecord_stop},
    {"native_setup",         "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;)I",
    {"native_setup",         "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",
                                      (void *)android_media_AudioRecord_setup},
    {"native_finalize",      "()V",    (void *)android_media_AudioRecord_finalize},
    {"native_release",       "()V",    (void *)android_media_AudioRecord_release},
+168 −128
Original line number Diff line number Diff line
@@ -213,23 +213,58 @@ static inline audio_channel_mask_t nativeChannelMaskFromJavaChannelMasks(

// ----------------------------------------------------------------------------
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jobject jaa,
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession) {
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
        jlong nativeAudioTrack) {

    jint elements[1];
    env->GetIntArrayRegion(jSampleRate, 0, 1, elements);
    int sampleRateInHertz = elements[0];
    ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"
        "nativeAudioTrack=0x%llX",
        jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
        nativeAudioTrack);

    ALOGV("sampleRate=%d, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d",
        sampleRateInHertz, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes);
    sp<AudioTrack> lpTrack = 0;

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    AudioTrackJniStorage* lpJniStorage = NULL;

    audio_attributes_t *paa = NULL;

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    if (nativeAudioTrack == 0) {
        if (jaa == 0) {
            ALOGE("Error creating AudioTrack: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }

        if (jSampleRate == 0) {
            ALOGE("Error creating AudioTrack: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }

        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
        int sampleRateInHertz = sampleRates[0];
        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);

        // Invalid channel representations are caught by !audio_is_output_channel() below.
        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                channelPositionMask, channelIndexMask);
@@ -250,37 +285,16 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,

        // compute the frame count
        size_t frameCount;
    if (audio_has_proportional_frames(format)) {
        if (audio_is_linear_pcm(format)) {
            const size_t bytesPerSample = audio_bytes_per_sample(format);
            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
        } else {
            frameCount = buffSizeInBytes;
        }

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

        // create the native AudioTrack object
    sp<AudioTrack> lpTrack = new AudioTrack();
        lpTrack = new AudioTrack();

    audio_attributes_t *paa = NULL;
        // read the AudioAttributes values
        paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
        const jstring jtags =
@@ -299,7 +313,7 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
    AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
@@ -363,6 +377,31 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
            ALOGE("Error %d initializing AudioTrack", status);
            goto native_init_failure;
        }
    } else {  // end if (nativeAudioTrack == 0)
        lpTrack = (AudioTrack*)nativeAudioTrack;
        // TODO: We need to find out which members of the Java AudioTrack might
        // need to be initialized from the Native AudioTrack
        // these are directly returned from getters:
        //  mSampleRate
        //  mAudioFormat
        //  mStreamType
        //  mChannelConfiguration
        //  mChannelCount
        //  mState (?)
        //  mPlayState (?)
        // these may be used internally (Java AudioTrack.audioParamCheck():
        //  mChannelMask
        //  mChannelIndexMask
        //  mDataLoadMode

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
        lpJniStorage->mCallbackData.busy = false;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
@@ -394,9 +433,11 @@ android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
    // since we had audio attributes, the stream type was derived from them during the
    // creation of the native AudioTrack: push the same value to the Java object
    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
    if (paa != NULL) {
        // audio attributes were copied in AudioTrack creation
        free(paa);
        paa = NULL;
    }


    return (jint) AUDIO_JAVA_SUCCESS;
@@ -418,7 +459,6 @@ native_init_failure:
    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}


// ----------------------------------------------------------------------------
static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
@@ -1123,7 +1163,7 @@ static const JNINativeMethod gMethods[] = {
    {"native_stop",          "()V",      (void *)android_media_AudioTrack_stop},
    {"native_pause",         "()V",      (void *)android_media_AudioTrack_pause},
    {"native_flush",         "()V",      (void *)android_media_AudioTrack_flush},
    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[I)I",
    {"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[IJ)I",
                                         (void *)android_media_AudioTrack_setup},
    {"native_finalize",      "()V",      (void *)android_media_AudioTrack_finalize},
    {"native_release",       "()V",      (void *)android_media_AudioTrack_release},
Loading