Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8882e307 authored by Andy Hung's avatar Andy Hung Committed by Android (Google) Code Review
Browse files

Merge "TypeConverter: proper conversion of invalid values to strings"

parents 63c0f36d 9b181958
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -415,14 +415,6 @@ bool deviceFromString(const std::string& literalDevice, audio_devices_t& device)
            OutputDeviceConverter::fromString(literalDevice, device);
}

bool deviceToString(audio_devices_t device, std::string& literalDevice) {
    if (device & AUDIO_DEVICE_BIT_IN) {
        return InputDeviceConverter::toString(device, literalDevice);
    } else {
        return OutputDeviceConverter::toString(device, literalDevice);
    }
}

SampleRateTraits::Collection samplingRatesFromString(
        const std::string &samplingRates, const char *del)
{
+34 −40
Original line number Diff line number Diff line
@@ -233,8 +233,6 @@ template<> const AudioFlagConverter::Table AudioFlagConverter::mTable[];

bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);

bool deviceToString(audio_devices_t device, std::string& literalDevice);

SampleRateTraits::Collection samplingRatesFromString(
        const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);

@@ -255,44 +253,50 @@ InputChannelTraits::Collection inputChannelMasksFromString(
OutputChannelTraits::Collection outputChannelMasksFromString(
        const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);

static inline std::string toString(audio_usage_t usage)
// counting enumerations
template <typename T, std::enable_if_t<std::is_same<T, audio_content_type_t>::value
                                    || std::is_same<T, audio_mode_t>::value
                                    || std::is_same<T, audio_source_t>::value
                                    || std::is_same<T, audio_stream_type_t>::value
                                    || std::is_same<T, audio_usage_t>::value
                                    , int> = 0>
static inline std::string toString(const T& value)
{
    std::string usageLiteral;
    if (!android::UsageTypeConverter::toString(usage, usageLiteral)) {
        ALOGV("failed to convert usage: %d", usage);
        return "AUDIO_USAGE_UNKNOWN";
    }
    return usageLiteral;
    std::string result;
    return TypeConverter<DefaultTraits<T>>::toString(value, result)
            ? result : std::to_string(static_cast<int>(value));

}

static inline std::string toString(audio_content_type_t content)
// flag enumerations
template <typename T, std::enable_if_t<std::is_same<T, audio_gain_mode_t>::value
                                    || std::is_same<T, audio_input_flags_t>::value
                                    || std::is_same<T, audio_output_flags_t>::value
                                    , int> = 0>
static inline std::string toString(const T& value)
{
    std::string contentLiteral;
    if (!android::AudioContentTypeConverter::toString(content, contentLiteral)) {
        ALOGV("failed to convert content type: %d", content);
        return "AUDIO_CONTENT_TYPE_UNKNOWN";
    }
    return contentLiteral;
    std::string result;
    TypeConverter<DefaultTraits<T>>::maskToString(value, result);
    return result;
}

static inline std::string toString(audio_stream_type_t stream)
static inline std::string toString(const audio_devices_t& devices)
{
    std::string streamLiteral;
    if (!android::StreamTypeConverter::toString(stream, streamLiteral)) {
        ALOGV("failed to convert stream: %d", stream);
        return "AUDIO_STREAM_DEFAULT";
    std::string result;
    if ((devices & AUDIO_DEVICE_BIT_IN) != 0) {
        InputDeviceConverter::maskToString(devices, result);
    } else {
        OutputDeviceConverter::maskToString(devices, result);
    }
    return streamLiteral;
    return result;
}

static inline std::string toString(audio_source_t source)
// TODO: Remove when FormatTraits uses DefaultTraits.
static inline std::string toString(const audio_format_t& format)
{
    std::string sourceLiteral;
    if (!android::SourceTypeConverter::toString(source, sourceLiteral)) {
        ALOGV("failed to convert source: %d", source);
        return "AUDIO_SOURCE_DEFAULT";
    }
    return sourceLiteral;
    std::string result;
    return TypeConverter<VectorTraits<audio_format_t>>::toString(format, result)
            ? result : std::to_string(static_cast<int>(format));
}

static inline std::string toString(const audio_attributes_t& attributes)
@@ -308,16 +312,6 @@ static inline std::string toString(const audio_attributes_t &attributes)
    return result.str();
}

static inline std::string toString(audio_mode_t mode)
{
    std::string modeLiteral;
    if (!android::AudioModeConverter::toString(mode, modeLiteral)) {
        ALOGV("failed to convert mode: %d", mode);
        return "AUDIO_MODE_INVALID";
    }
    return modeLiteral;
}

}; // namespace android

#endif  /*ANDROID_TYPE_CONVERTER_H_*/
+9 −55
Original line number Diff line number Diff line
@@ -457,52 +457,6 @@ const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBas
    }
}

std::string devicesToString(audio_devices_t devices)
{
    std::string result;
    if (devices & AUDIO_DEVICE_BIT_IN) {
        InputDeviceConverter::maskToString(devices, result);
    } else {
        OutputDeviceConverter::maskToString(devices, result);
    }
    return result;
}

std::string inputFlagsToString(audio_input_flags_t flags)
{
    std::string result;
    InputFlagConverter::maskToString(flags, result);
    return result;
}

std::string outputFlagsToString(audio_output_flags_t flags)
{
    std::string result;
    OutputFlagConverter::maskToString(flags, result);
    return result;
}

const char *sourceToString(audio_source_t source)
{
    switch (source) {
    case AUDIO_SOURCE_DEFAULT:              return "default";
    case AUDIO_SOURCE_MIC:                  return "mic";
    case AUDIO_SOURCE_VOICE_UPLINK:         return "voice uplink";
    case AUDIO_SOURCE_VOICE_DOWNLINK:       return "voice downlink";
    case AUDIO_SOURCE_VOICE_CALL:           return "voice call";
    case AUDIO_SOURCE_CAMCORDER:            return "camcorder";
    case AUDIO_SOURCE_VOICE_RECOGNITION:    return "voice recognition";
    case AUDIO_SOURCE_VOICE_COMMUNICATION:  return "voice communication";
    case AUDIO_SOURCE_REMOTE_SUBMIX:        return "remote submix";
    case AUDIO_SOURCE_UNPROCESSED:          return "unprocessed";
    case AUDIO_SOURCE_VOICE_PERFORMANCE:    return "voice performance";
    case AUDIO_SOURCE_ECHO_REFERENCE:       return "echo reference";
    case AUDIO_SOURCE_FM_TUNER:             return "FM tuner";
    case AUDIO_SOURCE_HOTWORD:              return "hotword";
    default:                                return "unknown";
    }
}

AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
        audio_devices_t outDevice, audio_devices_t inDevice, type_t type, bool systemReady)
    :   Thread(false /*canCallJava*/),
@@ -717,8 +671,8 @@ void AudioFlinger::ThreadBase::processConfigEvents_l()
            event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
            const audio_devices_t newDevice = getDevice();
            mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
                    (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
                    (unsigned)newDevice, devicesToString(newDevice).c_str());
                    (unsigned)oldDevice, toString(oldDevice).c_str(),
                    (unsigned)newDevice, toString(newDevice).c_str());
        } break;
        case CFG_EVENT_RELEASE_AUDIO_PATCH: {
            const audio_devices_t oldDevice = getDevice();
@@ -727,8 +681,8 @@ void AudioFlinger::ThreadBase::processConfigEvents_l()
            event->mStatus = releaseAudioPatch_l(data->mHandle);
            const audio_devices_t newDevice = getDevice();
            mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
                    (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
                    (unsigned)newDevice, devicesToString(newDevice).c_str());
                    (unsigned)oldDevice, toString(oldDevice).c_str(),
                    (unsigned)newDevice, toString(newDevice).c_str());
        } break;
        default:
            ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
@@ -858,9 +812,9 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __u
        dprintf(fd, " none\n");
    }
    // Note: output device may be used by capture threads for effects such as AEC.
    dprintf(fd, "  Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
    dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
    dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
    dprintf(fd, "  Output device: %#x (%s)\n", mOutDevice, toString(mOutDevice).c_str());
    dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, toString(mInDevice).c_str());
    dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());

    // Dump timestamp statistics for the Thread types that support it.
    if (mType == RECORD
@@ -1885,7 +1839,7 @@ void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>&
    AudioStreamOut *output = mOutput;
    audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
    dprintf(fd, "  AudioStreamOut: %p flags %#x (%s)\n",
            output, flags, outputFlagsToString(flags).c_str());
            output, flags, toString(flags).c_str());
    dprintf(fd, "  Frames written: %lld\n", (long long)mFramesWritten);
    dprintf(fd, "  Suspended frames: %lld\n", (long long)mSuspendedFrames);
    if (mPipeSink.get() != nullptr) {
@@ -7769,7 +7723,7 @@ void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& a
    AudioStreamIn *input = mInput;
    audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
    dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
            input, flags, inputFlagsToString(flags).c_str());
            input, flags, toString(flags).c_str());
    dprintf(fd, "  Frames read: %lld\n", (long long)mFramesRead);
    if (mActiveTracks.isEmpty()) {
        dprintf(fd, "  No active record clients\n");
+1 −3
Original line number Diff line number Diff line
@@ -41,9 +41,7 @@ static void dumpPatchEndpoints(
        const audio_port_config &cfg = cfgs[i];
        dst->appendFormat("%*s  [%s %d] ", spaces, "", prefix, i + 1);
        if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
            std::string device;
            deviceToString(cfg.ext.device.type, device);
            dst->appendFormat("Device ID %d %s", cfg.id, device.c_str());
            dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
        } else {
            dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
        }
+1 −3
Original line number Diff line number Diff line
@@ -66,9 +66,7 @@ void AudioPolicyMix::dump(String8 *dst, int spaces, int index) const
    RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
    dst->appendFormat("%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());

    std::string deviceLiteral;
    deviceToString(mMix.mDeviceType, deviceLiteral);
    dst->appendFormat("%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
    dst->appendFormat("%*s- device type: %s\n", spaces, "", toString(mMix.mDeviceType).c_str());

    dst->appendFormat("%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());

Loading