Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eff68753 authored by Treehugger Robot's avatar Treehugger Robot Committed by Gerrit Code Review
Browse files

Merge "Non ABI changes to neuralnetworks HAL"

parents 11cee080 a02e5e51
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -775,8 +775,9 @@ a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardwar
6017b4f2481feb0fffceae81c62bc372c898998b2d8fe69fbd39859d3a315e5e android.hardware.keymaster@4.0::IKeymasterDevice
dabe23dde7c9e3ad65c61def7392f186d7efe7f4216f9b6f9cf0863745b1a9f4 android.hardware.keymaster@4.1::IKeymasterDevice
cd84ab19c590e0e73dd2307b591a3093ee18147ef95e6d5418644463a6620076 android.hardware.neuralnetworks@1.2::IDevice
9625e85f56515ad2cf87b6a1847906db669f746ea4ab02cd3d4ca25abc9b0109 android.hardware.neuralnetworks@1.2::types
9e758e208d14f7256e0885d6d8ad0b61121b21d8c313864f981727ae55bffd16 android.hardware.neuralnetworks@1.3::types
f729ee6a5f136b25d79ea6895d24700fce413df555baaecf2c39e4440d15d043 android.hardware.neuralnetworks@1.0::types
c6ae443608502339aec4256feef48e7b2d36f7477ca5361cc95cd27a8ed9c612 android.hardware.neuralnetworks@1.2::types
9fe5a4093043c2b5da4e9491aed1646c388a5d3059b8fd77d5b6a9807e6d3a3e android.hardware.neuralnetworks@1.3::types
e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
+3 −2
Original line number Diff line number Diff line
@@ -308,8 +308,9 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, out_height, out_width, depth_out].
     *      For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the following condition must be satisfied: output_scale > input_scale * filter_scale
     *      For output tensor of
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
     *      be satisfied: output_scale > input_scale * filter_scale
     */
    CONV_2D = 3,

+5 −3
Original line number Diff line number Diff line
@@ -314,7 +314,8 @@ enum OperationType : int32_t {
     *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
     *      Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint values can be different from
     *      input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
     *      input tensors. Before HAL version 1.2 they have to be the same as for the
     *      input tensors.
     */
    CONCATENATION = @1.1::OperationType:CONCATENATION,

@@ -460,8 +461,9 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, out_height, out_width, depth_out].
     *      Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the following condition must be satisfied: output_scale > input_scale * filter_scale
     *      Before HAL version 1.2, for output tensor of
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
     *      be satisfied: output_scale > input_scale * filter_scale
     */
    CONV_2D = @1.1::OperationType:CONV_2D,

+13 −7
Original line number Diff line number Diff line
@@ -263,7 +263,8 @@ enum OperationType : int32_t {
     *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
     *      Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint values can be different from
     *      input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
     *      input tensors. Before HAL version 1.2 they have to be the same as for the
     *      input tensors.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
     *      the scale and zeroPoint values can be different from input tensors.
     */
@@ -312,7 +313,8 @@ enum OperationType : int32_t {
     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
     * * * input.scale * filter.scale).
     *
     * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
     * * Quantized signed with filter symmetric per channel quantization
     *   (since HAL version 1.3):
     * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@@ -425,8 +427,9 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, out_height, out_width, depth_out].
     *      Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the following condition must be satisfied: output_scale > input_scale * filter_scale
     *      Before HAL version 1.2, for output tensor of
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
     *      be satisfied: output_scale > input_scale * filter_scale
     */
    CONV_2D = @1.2::OperationType:CONV_2D,

@@ -477,7 +480,8 @@ enum OperationType : int32_t {
     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
     * * * input.scale * filter.scale).
     *
     * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
     * * Quantized signed with filter symmetric per channel quantization
     *   (since HAL version 1.3):
     * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@@ -3354,7 +3358,8 @@ enum OperationType : int32_t {
     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
     * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
     *
     * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
     * * Quantized signed with filter symmetric per channel quantization
     *   (since HAL version 1.3):
     * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
@@ -4615,7 +4620,8 @@ enum OperationType : int32_t {
     * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
     * * * input.scale * filter.scale).
     *
     * * Quantized signed with filter symmetric per channel quantization (since HAL version 1.3):
     * * Quantized signed with filter symmetric per channel quantization
     *   (since HAL version 1.3):
     * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
     * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
     * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,