Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6744b76e authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Android (Google) Code Review
Browse files

Merge "NNAPI: Update docs for ops with signed quantization"

parents f53015c8 eb7f1756
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -588,11 +588,11 @@ d3a344b7bd4c0d2658ae7209f55a979b8f53f361fd00f4fca29d5baa56d11fd2 android.hardwar
cd06a7911b9acd4a653bbf7133888878fbcb3f84be177c7a3f1becaae3d8618f android.hardware.camera.metadata@3.2::types
b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardware.keymaster@4.0::IKeymasterDevice
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardware.neuralnetworks@1.0::types
8eac60e1f724d141c71c69f06d4544acb720a55dfbbcd97fa01bb3d25ee4e2f5 android.hardware.neuralnetworks@1.0::types
5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
2d5483fbf59d5fd2de94665a6df05da5c3d09de67561d0db5e9f09e59e9aea46 android.hardware.neuralnetworks@1.2::types
7f7ef383268c95a1b8fe4e55c662bc806bb0ac11a154f6b049a113a44b0f024f android.hardware.neuralnetworks@1.2::types
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -648,7 +648,7 @@ a3eddd9bbdc87e8c22764070037dd1154f1cf006e6fba93364c4f85d4c134a19 android.hardwar
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
258825966435b3ed08832055bb736d81516013e405f161d9ccde9a90cfcdde83 android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
cf1d55e8c68300090747ab90b94c22e4c859b29c84ced68a317c595bb115eab2 android.hardware.neuralnetworks@1.3::types
35668befe89fc7f84d58fc1dab7dd3e4d6067c7eeccbae154fe36cd964dfaef7 android.hardware.neuralnetworks@1.3::types
3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
9bc274c9d73aae170fd9e18df2476ade4c19b629cfb38dd03dd237a6cc2d932b android.hardware.wifi.hostapd@1.2::IHostapd
11f6448d15336361180391c8ebcdfd2d7cf77b3782d577e594d583aadc9c2877 android.hardware.wifi.hostapd@1.2::types
+12 −13
Original line number Diff line number Diff line
@@ -261,8 +261,8 @@ enum OperationType : int32_t {
     *      filter.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -290,7 +290,8 @@ enum OperationType : int32_t {
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
@@ -355,8 +356,8 @@ enum OperationType : int32_t {
     *      specifying the filter.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -384,8 +385,8 @@ enum OperationType : int32_t {
     *      specifying the filter.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
@@ -492,8 +493,6 @@ enum OperationType : int32_t {
     *
     * Supported value tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
     * * {@link OperandType::TENSOR_INT32}
     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
     *
     * Supported value tensor rank: from 2
     *
@@ -556,10 +555,10 @@ enum OperationType : int32_t {
     *      of output nodes.
     * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
     *      tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
     *      also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
     *      of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
     *      bias_scale == input_scale * filter_scale.
     *      also be of {@link OperandType::TENSOR_FLOAT32}.
     *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32},
     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
     *      {@link FusedActivationFunc} values. Specifies the activation to
     *      invoke on the result.
+45 −38
Original line number Diff line number Diff line
@@ -375,8 +375,8 @@ enum OperationType : int32_t {
     *      must be set to 0.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -425,7 +425,8 @@ enum OperationType : int32_t {
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -523,8 +524,8 @@ enum OperationType : int32_t {
     *      must be set to 3.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -569,8 +570,8 @@ enum OperationType : int32_t {
     *      specifying the filter.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32}
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
@@ -705,8 +706,8 @@ enum OperationType : int32_t {
     *
     * Supported value tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
     * * {@link OperandType::TENSOR_INT32}
     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
     * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2)
     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
     *
     * Supported value tensor rank: from 2
     *
@@ -772,10 +773,10 @@ enum OperationType : int32_t {
     *      of output nodes.
     * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
     *      tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
     *      also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
     *      of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
     *      bias_scale == input_scale * filter_scale.
     *      also be of {@link OperandType::TENSOR_FLOAT32}.
     *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32},
     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
     * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
     *      {@link FusedActivationFunc} values. Specifies the activation to
     *      invoke on the result.
@@ -2659,7 +2660,8 @@ enum OperationType : int32_t {
     *      order of the boxes corresponds with input0. For input0 of type
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
     *      {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
     *      scale of 0.125. Zero num_rois is supported for this tensor.
     *      scale of 0.125.
     *      Zero num_rois is supported for this tensor.
     * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
     *      [num_rois], specifying the batch index of each box. Boxes with
     *      the same batch index are grouped together.
@@ -2686,6 +2688,7 @@ enum OperationType : int32_t {
     *      [num_output_rois], specifying the score of each output box. The boxes
     *      are grouped by batches, but the sequential order in each batch is not
     *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}
     *      the scale and zero point must be the same as input0.
     * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
     *      [num_output_rois, 4], specifying the coordinates of each
@@ -2703,7 +2706,7 @@ enum OperationType : int32_t {
    BOX_WITH_NMS_LIMIT = 44,

    /**
     * Casts a tensor to a new type.
     * Casts a tensor to a type.
     *
     * This operation ignores the scale and zeroPoint of quanized tensors,
     * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
@@ -3141,8 +3144,8 @@ enum OperationType : int32_t {
     *      {@link SymmPerChannelQuantParams}) must be set to 0.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
     *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3181,7 +3184,8 @@ enum OperationType : int32_t {
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
     *      type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
     *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
     *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
@@ -3661,21 +3665,24 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
     *      the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
     *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
     */
    PRELU = 71,

    /**
     * Quantizes the input tensor.
     *
     * The formula is:
     * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
     *
     *     output = max(0, min(255, round(input / scale) + zeroPoint)
     *
     * Supported tensor {@link OperandType}:
     * Supported input tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT16}
     * * {@link OperandType::TENSOR_FLOAT32}
     *
     * Supported output tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
     *
     * Supported tensor rank: from 1
     *
     * Inputs:
@@ -4325,15 +4332,15 @@ enum OperationType : int32_t {
     *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
     *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
     *      same type. For input tensor of type
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
     *      bias_scale == input_scale * filter_scale. For filter tensor of
     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
     *      must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal
     *      to bias_scale[i] = input_scale * filter_scale[i].
     *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the
     *      same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32},
     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
     *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
     *      bias_scale[i] = input_scale * filter_scale[i].
     * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
     *      the left, in the ‘width’ dimension.
     * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -4363,14 +4370,14 @@ enum OperationType : int32_t {
     * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
     *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
     *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
     *      same type. For input tensor of type
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
     *      of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
     *      bias_scale == input_scale * filter_scale. For filter tensor of
     *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
     *      must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
     *      0 and bias_scale of 0. The actual scale of each value 'i' is equal
     *      to bias_scale[i] = input_scale * filter_scale[i].
     *      same type.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the bias should be of {@link OperandType::TENSOR_INT32},
     *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
     *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
     *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
     *      and bias_scale of 0. The actual scale of each value 'i' is equal to
     *      bias_scale[i] = input_scale * filter_scale[i].
     * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
     *      tensor shape.
     * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
+237 −74

File changed.

Preview size limit exceeded, changes collapsed.