Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 559ec615 authored by Xin Li's avatar Xin Li Committed by Gerrit Code Review
Browse files

Merge "Merge Android 12L"

parents ffddb1da 94ce1fc5
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line

# Bug component: 41727
# Bug component: 41727
etalvala@google.com
etalvala@google.com
arakesh@google.com
arakesh@google.com
+6 −3
Original line number Original line Diff line number Diff line
@@ -37,8 +37,11 @@ interface ICameraServiceProxy
    oneway void notifyCameraState(in CameraSessionStats cameraSessionStats);
    oneway void notifyCameraState(in CameraSessionStats cameraSessionStats);


    /**
    /**
     * Reports whether the top activity needs a rotate and crop override.
     * Returns the necessary rotate and crop override for the top activity which
     * will be one of ({@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_NONE},
     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_90},
     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
     * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
     */
     */
    boolean isRotateAndCropOverrideNeeded(String packageName, int sensorOrientation,
    int getRotateAndCropOverride(String packageName, int lensFacing, int userId);
            int lensFacing);
}
}
+41 −4
Original line number Original line Diff line number Diff line
@@ -3600,7 +3600,8 @@ typedef enum acamera_metadata_tag {
     * YUV_420_888    | all output sizes available for JPEG, up to the maximum video size | LIMITED        |
     * YUV_420_888    | all output sizes available for JPEG, up to the maximum video size | LIMITED        |
     * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
     * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
     * <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
     * <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,
     * media performance class 12 or higher by setting
     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
     * the primary camera devices (first rear/front camera in the camera ID list) will not
     * the primary camera devices (first rear/front camera in the camera ID list) will not
     * support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
     * support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
     * smaller than 1080p, the camera device will round up the JPEG image size to at least
     * smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3618,9 +3619,11 @@ typedef enum acamera_metadata_tag {
     * YUV_420_888    | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED        |
     * YUV_420_888    | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED        |
     * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
     * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
     * <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
     * <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
     * to be media performance class S, or if the camera device isn't a primary rear/front
     * to be media performance class 12 or better by setting
     * camera, the minimum required output stream configurations are the same as for applications
     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
     * targeting SDK version older than 31.</p>
     * or if the camera device isn't a primary rear/front camera, the minimum required output
     * stream configurations are the same as for applications targeting SDK version older than
     * 31.</p>
     * <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
     * <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
     * mandatory stream configurations on a per-capability basis.</p>
     * mandatory stream configurations on a per-capability basis.</p>
     * <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
     * <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
@@ -4578,6 +4581,25 @@ typedef enum acamera_metadata_tag {
     *
     *
     * <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
     * <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
     * the sensor's coordinate system.</p>
     * the sensor's coordinate system.</p>
     * <p>Starting with Android API level 32, camera clients that query the orientation via
     * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
     * include logical cameras can receive a value that can dynamically change depending on the
     * device/fold state.
     * Clients are advised to not cache or store the orientation value of such logical sensors.
     * In case repeated queries to CameraCharacteristics are not preferred, then clients can
     * also access the entire mapping from device state to sensor orientation in
     * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.
     * Do note that a dynamically changing sensor orientation value in camera characteristics
     * will not be the best way to establish the orientation per frame. Clients that want to
     * know the sensor orientation of a particular captured frame should query the
     * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID from the corresponding capture result and
     * check the respective physical camera orientation.</p>
     * <p>Native camera clients must query ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS for the mapping
     * between device state and camera sensor orientation. Dynamic updates to the sensor
     * orientation are not supported in this code path.</p>
     *
     * @see ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS
     * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
     */
     */
    ACAMERA_SENSOR_ORIENTATION =                                // int32
    ACAMERA_SENSOR_ORIENTATION =                                // int32
            ACAMERA_SENSOR_START + 14,
            ACAMERA_SENSOR_START + 14,
@@ -6284,6 +6306,21 @@ typedef enum acamera_metadata_tag {
     */
     */
    ACAMERA_INFO_VERSION =                                      // byte
    ACAMERA_INFO_VERSION =                                      // byte
            ACAMERA_INFO_START + 1,
            ACAMERA_INFO_START + 1,
    /**
     *
     * <p>Type: int64[2*n]</p>
     *
     * <p>This tag may appear in:
     * <ul>
     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
     * </ul></p>
     *
     * <p>HAL must populate the array with
     * (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
     * supported device state bitwise combination.</p>
     */
    ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS =                    // int64[2*n]
            ACAMERA_INFO_START + 3,
    ACAMERA_INFO_END,
    ACAMERA_INFO_END,


    /**
    /**
+256 −0
Original line number Original line Diff line number Diff line
@@ -444,6 +444,22 @@ enum {
};
};
typedef int32_t aaudio_content_type_t;
typedef int32_t aaudio_content_type_t;


enum {

    /**
     * Constant indicating the audio content associated with these attributes will follow the
     * default platform behavior with regards to which content will be spatialized or not.
     */
    AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO = 1,

    /**
     * Constant indicating the audio content associated with these attributes should never
     * be spatialized.
     */
    AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER = 2,
};
typedef int32_t aaudio_spatialization_behavior_t;

/**
/**
 * Defines the audio source.
 * Defines the audio source.
 * An audio source defines both a default physical source of audio signal, and a recording
 * An audio source defines both a default physical source of audio signal, and a recording
@@ -565,6 +581,145 @@ enum {
};
};
typedef int32_t aaudio_session_id_t;
typedef int32_t aaudio_session_id_t;


/**
 * Defines the audio channel mask.
 * Channel masks are used to describe the samples and their
 * arrangement in the audio frame. They are also used in the endpoint
 * (e.g. a USB audio interface, a DAC connected to headphones) to
 * specify allowable configurations of a particular device.
 *
 * Added in API level 32.
 */
enum {
    /**
     * Invalid channel mask
     */
    AAUDIO_CHANNEL_INVALID = -1,

    /**
     * Output audio channel mask
     */
    AAUDIO_CHANNEL_FRONT_LEFT = 1 << 0,
    AAUDIO_CHANNEL_FRONT_RIGHT = 1 << 1,
    AAUDIO_CHANNEL_FRONT_CENTER = 1 << 2,
    AAUDIO_CHANNEL_LOW_FREQUENCY = 1 << 3,
    AAUDIO_CHANNEL_BACK_LEFT = 1 << 4,
    AAUDIO_CHANNEL_BACK_RIGHT = 1 << 5,
    AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6,
    AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7,
    AAUDIO_CHANNEL_BACK_CENTER = 1 << 8,
    AAUDIO_CHANNEL_SIDE_LEFT = 1 << 9,
    AAUDIO_CHANNEL_SIDE_RIGHT = 1 << 10,
    AAUDIO_CHANNEL_TOP_CENTER = 1 << 11,
    AAUDIO_CHANNEL_TOP_FRONT_LEFT = 1 << 12,
    AAUDIO_CHANNEL_TOP_FRONT_CENTER = 1 << 13,
    AAUDIO_CHANNEL_TOP_FRONT_RIGHT = 1 << 14,
    AAUDIO_CHANNEL_TOP_BACK_LEFT = 1 << 15,
    AAUDIO_CHANNEL_TOP_BACK_CENTER = 1 << 16,
    AAUDIO_CHANNEL_TOP_BACK_RIGHT = 1 << 17,
    AAUDIO_CHANNEL_TOP_SIDE_LEFT = 1 << 18,
    AAUDIO_CHANNEL_TOP_SIDE_RIGHT = 1 << 19,
    AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20,
    AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21,
    AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22,
    AAUDIO_CHANNEL_LOW_FREQUENCY_2 = 1 << 23,
    AAUDIO_CHANNEL_FRONT_WIDE_LEFT = 1 << 24,
    AAUDIO_CHANNEL_FRONT_WIDE_RIGHT = 1 << 25,

    AAUDIO_CHANNEL_MONO = AAUDIO_CHANNEL_FRONT_LEFT,
    AAUDIO_CHANNEL_STEREO = AAUDIO_CHANNEL_FRONT_LEFT |
                            AAUDIO_CHANNEL_FRONT_RIGHT,
    AAUDIO_CHANNEL_2POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
                             AAUDIO_CHANNEL_FRONT_RIGHT |
                             AAUDIO_CHANNEL_LOW_FREQUENCY,
    AAUDIO_CHANNEL_TRI = AAUDIO_CHANNEL_FRONT_LEFT |
                         AAUDIO_CHANNEL_FRONT_RIGHT |
                         AAUDIO_CHANNEL_FRONT_CENTER,
    AAUDIO_CHANNEL_TRI_BACK = AAUDIO_CHANNEL_FRONT_LEFT |
                              AAUDIO_CHANNEL_FRONT_RIGHT |
                              AAUDIO_CHANNEL_BACK_CENTER,
    AAUDIO_CHANNEL_3POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
                             AAUDIO_CHANNEL_FRONT_RIGHT |
                             AAUDIO_CHANNEL_FRONT_CENTER |
                             AAUDIO_CHANNEL_LOW_FREQUENCY,
    AAUDIO_CHANNEL_2POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
                                   AAUDIO_CHANNEL_FRONT_RIGHT |
                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
    AAUDIO_CHANNEL_2POINT1POINT2 = AAUDIO_CHANNEL_2POINT0POINT2 |
                                   AAUDIO_CHANNEL_LOW_FREQUENCY,
    AAUDIO_CHANNEL_3POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
                                   AAUDIO_CHANNEL_FRONT_RIGHT |
                                   AAUDIO_CHANNEL_FRONT_CENTER |
                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
    AAUDIO_CHANNEL_3POINT1POINT2 = AAUDIO_CHANNEL_3POINT0POINT2 |
                                   AAUDIO_CHANNEL_LOW_FREQUENCY,
    AAUDIO_CHANNEL_QUAD = AAUDIO_CHANNEL_FRONT_LEFT |
                          AAUDIO_CHANNEL_FRONT_RIGHT |
                          AAUDIO_CHANNEL_BACK_LEFT |
                          AAUDIO_CHANNEL_BACK_RIGHT,
    AAUDIO_CHANNEL_QUAD_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
                               AAUDIO_CHANNEL_FRONT_RIGHT |
                               AAUDIO_CHANNEL_SIDE_LEFT |
                               AAUDIO_CHANNEL_SIDE_RIGHT,
    AAUDIO_CHANNEL_SURROUND = AAUDIO_CHANNEL_FRONT_LEFT |
                              AAUDIO_CHANNEL_FRONT_RIGHT |
                              AAUDIO_CHANNEL_FRONT_CENTER |
                              AAUDIO_CHANNEL_BACK_CENTER,
    AAUDIO_CHANNEL_PENTA = AAUDIO_CHANNEL_QUAD |
                           AAUDIO_CHANNEL_FRONT_CENTER,
    // aka 5POINT1_BACK
    AAUDIO_CHANNEL_5POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
                             AAUDIO_CHANNEL_FRONT_RIGHT |
                             AAUDIO_CHANNEL_FRONT_CENTER |
                             AAUDIO_CHANNEL_LOW_FREQUENCY |
                             AAUDIO_CHANNEL_BACK_LEFT |
                             AAUDIO_CHANNEL_BACK_RIGHT,
    AAUDIO_CHANNEL_5POINT1_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
                                  AAUDIO_CHANNEL_FRONT_RIGHT |
                                  AAUDIO_CHANNEL_FRONT_CENTER |
                                  AAUDIO_CHANNEL_LOW_FREQUENCY |
                                  AAUDIO_CHANNEL_SIDE_LEFT |
                                  AAUDIO_CHANNEL_SIDE_RIGHT,
    AAUDIO_CHANNEL_6POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
                             AAUDIO_CHANNEL_FRONT_RIGHT |
                             AAUDIO_CHANNEL_FRONT_CENTER |
                             AAUDIO_CHANNEL_LOW_FREQUENCY |
                             AAUDIO_CHANNEL_BACK_LEFT |
                             AAUDIO_CHANNEL_BACK_RIGHT |
                             AAUDIO_CHANNEL_BACK_CENTER,
    AAUDIO_CHANNEL_7POINT1 = AAUDIO_CHANNEL_5POINT1 |
                             AAUDIO_CHANNEL_SIDE_LEFT |
                             AAUDIO_CHANNEL_SIDE_RIGHT,
    AAUDIO_CHANNEL_5POINT1POINT2 = AAUDIO_CHANNEL_5POINT1 |
                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
    AAUDIO_CHANNEL_5POINT1POINT4 = AAUDIO_CHANNEL_5POINT1 |
                                   AAUDIO_CHANNEL_TOP_FRONT_LEFT |
                                   AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
                                   AAUDIO_CHANNEL_TOP_BACK_LEFT |
                                   AAUDIO_CHANNEL_TOP_BACK_RIGHT,
    AAUDIO_CHANNEL_7POINT1POINT2 = AAUDIO_CHANNEL_7POINT1 |
                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
    AAUDIO_CHANNEL_7POINT1POINT4 = AAUDIO_CHANNEL_7POINT1 |
                                   AAUDIO_CHANNEL_TOP_FRONT_LEFT |
                                   AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
                                   AAUDIO_CHANNEL_TOP_BACK_LEFT |
                                   AAUDIO_CHANNEL_TOP_BACK_RIGHT,
    AAUDIO_CHANNEL_9POINT1POINT4 = AAUDIO_CHANNEL_7POINT1POINT4 |
                                   AAUDIO_CHANNEL_FRONT_WIDE_LEFT |
                                   AAUDIO_CHANNEL_FRONT_WIDE_RIGHT,
    AAUDIO_CHANNEL_9POINT1POINT6 = AAUDIO_CHANNEL_9POINT1POINT4 |
                                   AAUDIO_CHANNEL_TOP_SIDE_LEFT |
                                   AAUDIO_CHANNEL_TOP_SIDE_RIGHT,

    AAUDIO_CHANNEL_FRONT_BACK = AAUDIO_CHANNEL_FRONT_CENTER |
                                AAUDIO_CHANNEL_BACK_CENTER,
};
typedef uint32_t aaudio_channel_mask_t;

typedef struct AAudioStreamStruct         AAudioStream;
typedef struct AAudioStreamStruct         AAudioStream;
typedef struct AAudioStreamBuilderStruct  AAudioStreamBuilder;
typedef struct AAudioStreamBuilderStruct  AAudioStreamBuilder;


@@ -699,6 +854,11 @@ AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
 * If an exact value is specified then an opened stream will use that value.
 * If an exact value is specified then an opened stream will use that value.
 * If a stream cannot be opened with the specified value then the open will fail.
 * If a stream cannot be opened with the specified value then the open will fail.
 *
 *
 * As the channel count provided here may be different from the corresponding channel count
 * of channel mask used in {@link AAudioStreamBuilder_setChannelMask}, the last called function
 * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
 * called.
 *
 * Available since API level 26.
 * Available since API level 26.
 *
 *
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param builder reference provided by AAudio_createStreamBuilder()
@@ -714,6 +874,8 @@ AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* builder
 *
 *
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param samplesPerFrame Number of samples in a frame.
 * @param samplesPerFrame Number of samples in a frame.
 *
 * @deprecated use {@link AAudioStreamBuilder_setChannelCount}
 */
 */
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
                                                       int32_t samplesPerFrame) __INTRODUCED_IN(26);
                                                       int32_t samplesPerFrame) __INTRODUCED_IN(26);
@@ -835,6 +997,37 @@ AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
        aaudio_content_type_t contentType) __INTRODUCED_IN(28);
        aaudio_content_type_t contentType) __INTRODUCED_IN(28);


/**
 * Sets the behavior affecting whether spatialization will be used.
 *
 * The AAudio system will use this information to select whether the stream will go
 * through a spatializer effect or not when the effect is supported and enabled.
 *
 * Available since API level 32.
 *
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param spatializationBehavior the desired behavior with regards to spatialization, eg.
 *     {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
 */
AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
        aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);

/**
 * Specifies whether the audio data of this output stream has already been processed for
 * spatialization.
 *
 * If the stream has been processed for spatialization, setting this to true will prevent
 * issues such as double-processing on platforms that will spatialize audio data.
 *
 * Available since API level 32.
 *
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param isSpatialized true if the content is already processed for binaural or transaural spatial
 *     rendering, false otherwise.
 */
AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
        bool isSpatialized) __INTRODUCED_IN(32);

/**
/**
 * Set the input (capture) preset for the stream.
 * Set the input (capture) preset for the stream.
 *
 *
@@ -1136,6 +1329,32 @@ AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder*
AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
AAUDIO_API aaudio_result_t  AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
    __INTRODUCED_IN(26);
    __INTRODUCED_IN(26);


/**
 * Set audio channel mask for the stream.
 *
 * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
 * If both channel mask and count are not set, then stereo will then be chosen when the
 * stream is opened.
 * After opening a stream with an unspecified value, the application must query for the
 * actual value, which may vary by device.
 *
 * If an exact value is specified then an opened stream will use that value.
 * If a stream cannot be opened with the specified value then the open will fail.
 *
 * As the corresponding channel count of provided channel mask here may be different
 * from the channel count used in {@link AAudioStreamBuilder_setChannelCount} or
 * {@link AAudioStreamBuilder_setSamplesPerFrame}, the last called function will be
 * respected if this function and {@link AAudioStreamBuilder_setChannelCount} or
 * {@link AAudioStreamBuilder_setSamplesPerFrame} are called.
 *
 * Available since API level 32.
 *
 * @param builder reference provided by AAudio_createStreamBuilder()
 * @param channelMask Audio channel mask desired.
 */
AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
        aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);

// ============================================================
// ============================================================
// Stream Control
// Stream Control
// ============================================================
// ============================================================
@@ -1615,6 +1834,31 @@ AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream) __INTRODUC
AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
        __INTRODUCED_IN(28);
        __INTRODUCED_IN(28);


/**
 * Return the spatialization behavior for the stream.
 *
 * If none was explicitly set, it will return the default
 * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO} behavior.
 *
 * Available since API level 32.
 *
 * @param stream reference provided by AAudioStreamBuilder_openStream()
 * @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
 */
AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
        AAudioStream* stream) __INTRODUCED_IN(32);

/**
 * Return whether the content of the stream is spatialized.
 *
 * Available since API level 32.
 *
 * @param stream reference provided by AAudioStreamBuilder_openStream()
 * @return true if the content is spatialized
 */
AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);


/**
/**
 * Return the input preset for the stream.
 * Return the input preset for the stream.
 *
 *
@@ -1652,6 +1896,18 @@ AAUDIO_API aaudio_allowed_capture_policy_t AAudioStream_getAllowedCapturePolicy(
AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
        __INTRODUCED_IN(30);
        __INTRODUCED_IN(30);


/**
 * Return the channel mask for the stream. This will be the mask set using
 * {@link #AAudioStreamBuilder_setChannelMask}, or {@link #AAUDIO_UNSPECIFIED} otherwise.
 *
 * Available since API level 32.
 *
 * @param stream reference provided by AAudioStreamBuilder_openStream()
 * @return actual channel mask
 */
AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
        __INTRODUCED_IN(32);

#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
+9 −2
Original line number Original line Diff line number Diff line
@@ -30,7 +30,7 @@ using namespace aaudio;
using android::media::audio::common::AudioFormat;
using android::media::audio::common::AudioFormat;


AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
    setSamplesPerFrame(parcelable.samplesPerFrame);
    setChannelMask(parcelable.channelMask);
    setSampleRate(parcelable.sampleRate);
    setSampleRate(parcelable.sampleRate);
    setDeviceId(parcelable.deviceId);
    setDeviceId(parcelable.deviceId);
    static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
    static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
@@ -43,6 +43,13 @@ AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& par
    setUsage(parcelable.usage);
    setUsage(parcelable.usage);
    static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
    static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
    setContentType(parcelable.contentType);
    setContentType(parcelable.contentType);

    static_assert(sizeof(aaudio_spatialization_behavior_t) ==
            sizeof(parcelable.spatializationBehavior));
    setSpatializationBehavior(parcelable.spatializationBehavior);
    setIsContentSpatialized(parcelable.isContentSpatialized);


    static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
    static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
    setInputPreset(parcelable.inputPreset);
    setInputPreset(parcelable.inputPreset);
    setBufferCapacity(parcelable.bufferCapacity);
    setBufferCapacity(parcelable.bufferCapacity);
@@ -63,7 +70,7 @@ AAudioStreamConfiguration::operator=(const StreamParameters& parcelable) {


StreamParameters AAudioStreamConfiguration::parcelable() const {
StreamParameters AAudioStreamConfiguration::parcelable() const {
    StreamParameters result;
    StreamParameters result;
    result.samplesPerFrame = getSamplesPerFrame();
    result.channelMask = getChannelMask();
    result.sampleRate = getSampleRate();
    result.sampleRate = getSampleRate();
    result.deviceId = getDeviceId();
    result.deviceId = getDeviceId();
    static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
    static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
Loading