Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 75ba0bfc authored by Slava Shklyaev's avatar Slava Shklyaev Committed by android-build-merger
Browse files

Sync docs with NeuralNetworks.h

am: af528bc5

Change-Id: I0944a6d69463ede34dbee21e52423229909a457f
parents fdf9aed0 af528bc5
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -387,8 +387,10 @@ cd4330c3196bda1d642a32abfe23a7d64ebfbda721940643af6867af3b3f0aa9 android.hardwar
# ABI preserving changes to HALs during Android Q
da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
574e8f1499436fb4075894dcae0b36682427956ecb114f17f1fe22d116a83c6b android.hardware.neuralnetworks@1.0::IPreparedModel
1a5ae9793223658174258b523763c557abad6fb917df0b8e3cc097fc89035811 android.hardware.neuralnetworks@1.0::types
4310eb8272f085914952f3bfb73a8f8bb477a80e8b93596f0ea5acb58546b66d android.hardware.neuralnetworks@1.1::types
1fb32361286b938d48a55c2539c846732afce0b99fe08590f556643125bc13d3 android.hardware.neuralnetworks@1.0::types
e22e8135d061d0e9c4c1a70c25c19fdba10f4d3cda9795ef25b6392fc520317c android.hardware.neuralnetworks@1.1::types
810b03825c633b21982871a8aa690db94285947fca71881de71bf293ad0aa9c5 android.hardware.neuralnetworks@1.2::types
79f3820a02f37bb0f84bca1a07900fd5bd819ec5a60ed14b205e1dc5e24a51b2 android.hardware.neuralnetworks@1.2::IDevice
1d4a5776614c08b5d794a5ec5ab04697260cbd4b3441d5935cd53ee71d19da02 android.hardware.radio@1.0::IRadioResponse
271187e261b30c01a33011aea257c07a2d2f05b72943ebee89e973e997849973 android.hardware.radio@1.0::types
1d19720d4fd38b1095f0f555a4bd92b3b12c9b1d0f560b0e9a474cd6dcc20db6 android.hardware.radio@1.2::IRadio
+78 −16
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ enum OperandType : int32_t {
 * The type of an operation in a model.
 */
enum OperationType : int32_t {

    /**
     * Adds two tensors, element-wise.
     *
@@ -105,6 +106,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The sum, a tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 27.
     */
    ADD = 0,

@@ -116,8 +119,10 @@ enum OperationType : int32_t {
     *
     * The values in the output tensor are computed as:
     *
     *     output[batch, row, col, channel] =
     *         sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
     *     output[b, i, j, channel] =
     *         sum_{di, dj}(
     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
     *         ) / sum(1)
     *
     * Supported tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
@@ -171,7 +176,9 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output 4-D tensor, of shape
            [batches, out_height, out_width, depth].
     *      [batches, out_height, out_width, depth].
     *
     * Available since API level 27.
     */
    AVERAGE_POOL_2D = 1,

@@ -198,6 +205,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output, a tensor of the same {@link OperandType} as the input
     *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
     *
     * Available since API level 27.
     */
    CONCATENATION = 2,

@@ -213,12 +222,11 @@ enum OperationType : int32_t {
     *
     * The values in the output tensor are computed as:
     *
     *     output[batch, row, col, channel] =
     *         sum_{i, j} (
     *             input[batch, row + i, col + j, k] *
     *             filter[channel, row + i, col + j, k] +
     *             bias[channel]
     *         )
     *     output[b, i, j, channel] =
     *         sum_{di, dj, k} (
     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
     *             filter[channel, di, dj, k]
     *         ) + bias[channel]
     *
     * Supported tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
@@ -284,6 +292,8 @@ enum OperationType : int32_t {
     *      [batches, out_height, out_width, depth_out]. For output tensor of
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
     *      must be satisfied: output_scale > input_scale * filter_scale.
     *
     * Available since API level 27.
     */
    CONV_2D = 3,

@@ -307,7 +317,7 @@ enum OperationType : int32_t {
     *         sum_{di, dj} (
     *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
     *             filter[1, di, dj, k * channel_multiplier + q]
     *         )
     *         ) + bias[k * channel_multiplier + q]
     *
     * Supported tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
@@ -375,6 +385,8 @@ enum OperationType : int32_t {
     *      [batches, out_height, out_width, depth_out]. For output tensor of
     *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
     *      must be satisfied: output_scale > input_scale * filter_scale.
     *
     * Available since API level 27.
     */
    DEPTHWISE_CONV_2D = 4,

@@ -409,6 +421,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape [batch, height*block_size,
     *      width*block_size, depth/(block_size*block_size)].
     *
     * Available since API level 27.
     */
    DEPTH_TO_SPACE = 5,

@@ -430,6 +444,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output tensor of same shape as input0, but with
     *      {@link OperandType::TENSOR_FLOAT32}.
     *
     * Available since API level 27.
     */
    DEQUANTIZE = 6,

@@ -463,6 +479,8 @@ enum OperationType : int32_t {
     * * 0: A n-D tensor with the same rank and shape as the Values
     *      tensor, except for the first dimension which has the same size
     *      as Lookups' only dimension.
     *
     * Available since API level 27.
     */
    EMBEDDING_LOOKUP = 7,

@@ -480,6 +498,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output tensor, of the same {@link OperandType} and dimensions as
     *      the input tensor.
     *
     * Available since API level 27.
     */
    FLOOR = 8,

@@ -523,6 +543,8 @@ enum OperationType : int32_t {
     *      tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
     *      condition must be satisfied:
     *      output_scale > input_scale * filter_scale.
     *
     * Available since API level 27.
     */
    FULLY_CONNECTED = 9,

@@ -571,6 +593,8 @@ enum OperationType : int32_t {
     *      Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
     *      and scale 1.0f.
     *      A non-zero byte represents True, a hit. A zero indicates otherwise.
     *
     * Available since API level 27.
     */
    HASHTABLE_LOOKUP = 10,

@@ -598,6 +622,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of the same shape as input
     *      [batches, height, width, depth].
     *
     * Available since API level 27.
     */
    L2_NORMALIZATION = 11,

@@ -609,8 +635,8 @@ enum OperationType : int32_t {
     *
     * The values in the output tensor are computed as:
     *
     *     output[batch, row, col, channel] =
     *         sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
     *     output[b, i, j, c] =
     *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
     *              sum(1))
     *
     * Supported tensor {@link OperandType}:
@@ -664,6 +690,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, out_height, out_width, depth].
     *
     * Available since API level 27.
     */
    L2_POOL_2D = 12,

@@ -700,6 +728,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor of same shape as input0.
     *
     * Available since API level 27.
     */
    LOCAL_RESPONSE_NORMALIZATION = 13,

@@ -723,6 +753,8 @@ enum OperationType : int32_t {
     * * 0: The output tensor of same shape as input0.
     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
     *
     * Available since API level 27.
     */
    LOGISTIC = 14,

@@ -758,6 +790,8 @@ enum OperationType : int32_t {
     *      If the projection type is Dense:
     *        Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
     *        A flattened tensor that represents projected bit vectors.
     *
     * Available since API level 27.
     */
    LSH_PROJECTION = 15,

@@ -952,6 +986,8 @@ enum OperationType : int32_t {
     *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
     *      [batch_size, output_size]. This is effectively the same as the
     *      current “output state (out)” value.
     *
     * Available since API level 27.
     */
    LSTM = 16,

@@ -963,8 +999,10 @@ enum OperationType : int32_t {
     *
     * The values in the output tensor are computed as:
     *
     *     output[batch, row, col, channel] =
     *         max_{i, j} (input[batch, row + i, col + j, channel])
     *     output[b, i, j, channel] =
     *         max_{di, dj} (
     *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
     *         )
     *
     * Supported tensor {@link OperandType}:
     * * {@link OperandType::TENSOR_FLOAT32}
@@ -1018,6 +1056,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, out_height, out_width, depth].
     *
     * Available since API level 27.
     */
    MAX_POOL_2D = 17,

@@ -1055,6 +1095,8 @@ enum OperationType : int32_t {
     *      For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the following condition must be satisfied:
     *      output_scale > input1_scale * input2_scale.
     *
     * Available since API level 27.
     */
    MUL = 18,

@@ -1076,6 +1118,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor of same shape as input0.
     *
     * Available since API level 27.
     */
    RELU = 19,

@@ -1097,6 +1141,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor of same shape as input0.
     *
     * Available since API level 27.
     */
    RELU1 = 20,

@@ -1118,6 +1164,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor of same shape as input0.
     *
     * Available since API level 27.
     */
    RELU6 = 21,

@@ -1141,6 +1189,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor, of shape specified by the input shape.
     *
     * Available since API level 27.
     */
    RESHAPE = 22,

@@ -1167,6 +1217,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape
     *      [batches, new_height, new_width, depth].
     *
     * Available since API level 27.
     */
    RESIZE_BILINEAR = 23,

@@ -1222,6 +1274,8 @@ enum OperationType : int32_t {
     *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
     *      [batch_size, num_units]. This is effectively the same as the
     *      current state value.
     *
     * Available since API level 27.
     */
    RNN = 24,

@@ -1251,6 +1305,8 @@ enum OperationType : int32_t {
     * * 0: The output tensor of same shape as input0.
     *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
     *      the scale must be 1.f / 256 and the zeroPoint must be 0.
     *
     * Available since API level 27.
     */
    SOFTMAX = 25,

@@ -1284,6 +1340,8 @@ enum OperationType : int32_t {
     * Outputs:
     * * 0: The output 4-D tensor, of shape [batches, height/block_size,
     *      width/block_size, depth_in*block_size*block_size].
     *
     * Available since API level 27.
     */
    SPACE_TO_DEPTH = 26,

@@ -1362,6 +1420,8 @@ enum OperationType : int32_t {
     * * 1: output.
     *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
     *      [batch_size, num_units].
     *
     * Available since API level 27.
     */
    SVDF = 27,

@@ -1382,6 +1442,8 @@ enum OperationType : int32_t {
     *
     * Outputs:
     * * 0: The output tensor of same shape as input0.
     *
     * Available since API level 27.
     */
    TANH = 28,

+38 −16
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ import @1.0::PerformanceInfo;
 * The type of an operation in a model.
 */
enum OperationType : @1.0::OperationType {

    /**
     * BatchToSpace for N-dimensional tensors.
     *
@@ -50,6 +51,8 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    BATCH_TO_SPACE_ND = 29,

@@ -88,6 +91,8 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    DIV = 30,

@@ -118,6 +123,8 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    MEAN = 31,

@@ -150,6 +157,8 @@ enum OperationType : @1.0::OperationType {
     *      of the padding:
     *          output0.dimension[i] =
     *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
     *
     * Available since API level 28.
     */
    PAD = 32,

@@ -185,6 +194,8 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    SPACE_TO_BATCH_ND = 33,

@@ -214,6 +225,8 @@ enum OperationType : @1.0::OperationType {
     * * 0: A tensor of the same {@link OperandType} as input0. Contains the
     *      same data as input, but has one or more dimensions of size 1
     *      removed.
     *
     * Available since API level 28.
     */
    SQUEEZE = 34,

@@ -234,28 +247,32 @@ enum OperationType : @1.0::OperationType {
     *
     * Inputs:
     * * 0: An n-D tensor, specifying the tensor to be sliced.
     * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the starts of
     *      the dimensions of the input tensor to be sliced. The length must be
     *      of rank(input0).
     * * 2: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the ends of
     *      the dimensions of the input tensor to be sliced. The length must be
     *      of rank(input0).
     * * 3: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the strides of
     *      the dimensions of the input tensor to be sliced. The length must be
     *      of rank(input0).
     * * 4: An {@link OperandType::INT32} scalar, begin_mask. If the ith bit
     * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
     *      starts of the dimensions of the input tensor to be sliced. The
     *      length must be of rank(input0).
     * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
     *      ends of the dimensions of the input tensor to be sliced. The length
     *      must be of rank(input0).
     * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
     *      strides of the dimensions of the input tensor to be sliced. The
     *      length must be of rank(input0). The entries must be non-zero.
     * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
     *      of begin_mask is set, begin[i] is ignored and the fullest possible
     *      range in that dimension is used instead.
     * * 5: An {@link OperandType::INT32} scalar, end_mask. If the ith bit of
     * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
     *      end_mask is set, end[i] is ignored and the fullest possible range in
     *      that dimension is used instead.
     * * 6: An {@link OperandType::INT32} scalar, shrink_axis_mask. An int32
     *      mask. If the ith bit of shrink_axis_mask is set, it implies that the
     *      ith specification shrinks the dimensionality by 1. A slice of size 1
     *      starting from begin[i] in the dimension must be preserved.
     * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
     *      ith bit of shrink_axis_mask is set, the ith dimension specification
     *      shrinks the dimensionality by 1, taking on the value at index
     *      begin[i]. In this case, the ith specification must define a
     *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
     *      where k is the number of bits set in shrink_axis_mask.
     *
     * Available since API level 28.
     */
    STRIDED_SLICE = 35,

@@ -294,6 +311,8 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    SUB = 36,

@@ -319,8 +338,11 @@ enum OperationType : @1.0::OperationType {
     *
     * Outputs:
     * * 0: A tensor of the same {@link OperandType} as input0.
     *
     * Available since API level 28.
     */
    TRANSPOSE = 37,

};

/**
+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ import @1.1::OperationType;
 * The type of an operation in a model.
 */
enum OperationType : @1.1::OperationType {

};

/**