Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd4e7897 authored by TreeHugger Robot's avatar TreeHugger Robot Committed by Android (Google) Code Review
Browse files

Merge "le_audio: Use set scenarios for multiple context types" into tm-qpr-dev

parents 7110f0bc f07ca0bf
Loading
Loading
Loading
Loading
+0 −51
Original line number Diff line number Diff line
@@ -5,44 +5,6 @@
        "  configurations, listed in the order of priority."
    ],
    "scenarios": [
        {
            "name": "Ringtone",
            "configurations": [
                "DualDev_OneChanStereoSnk_OneChanStereoSrc_32_2_Server_Preferred",
                "DualDev_OneChanStereoSnk_OneChanStereoSrc_32_2_1",
                "DualDev_OneChanStereoSnk_OneChanMonoSrc_32_2_Server_Preferred",
                "DualDev_OneChanStereoSnk_OneChanMonoSrc_32_2_1",
                "DualDev_OneChanDoubleStereoSnk_OneChanMonoSrc_32_2_Server_Preferred",
                "DualDev_OneChanDoubleStereoSnk_OneChanMonoSrc_32_2_1",
                "SingleDev_TwoChanStereoSnk_TwoChanStereoSrc_32_2_Server_Preferred",
                "SingleDev_TwoChanStereoSnk_TwoChanStereoSrc_32_2_1",
                "SingleDev_TwoChanStereoSnk_OneChanMonoSrc_32_2_Server_Preferred",
                "SingleDev_TwoChanStereoSnk_OneChanMonoSrc_32_2_1",
                "SingleDev_OneChanStereoSnk_OneChanMonoSrc_32_2_Server_Preferred",
                "SingleDev_OneChanStereoSnk_OneChanMonoSrc_32_2_1",
                "SingleDev_OneChanMonoSnk_OneChanMonoSrc_32_2_Server_Preferred",
                "SingleDev_OneChanMonoSnk_OneChanMonoSrc_32_2_1",
                "DualDev_OneChanStereoSnk_16_2_Server_Preferred",
                "DualDev_OneChanStereoSnk_16_2_1",
                "DualDev_OneChanStereoSnk_16_1_Server_Preferred",
                "DualDev_OneChanStereoSnk_16_1_1",
                "SingleDev_OneChanStereoSnk_16_2_Server_Preferred",
                "SingleDev_OneChanStereoSnk_16_2_1",
                "SingleDev_OneChanStereoSnk_16_1_Server_Preferred",
                "SingleDev_OneChanStereoSnk_16_1_1",
                "SingleDev_TwoChanStereoSnk_16_2_Server_Preferred",
                "SingleDev_TwoChanStereoSnk_16_2_1",
                "SingleDev_TwoChanStereoSnk_16_1_Server_Preferred",
                "SingleDev_TwoChanStereoSnk_16_1_1",
                "SingleDev_OneChanMonoSnk_16_2_Server_Preferred",
                "SingleDev_OneChanMonoSnk_16_2_1",
                "SingleDev_OneChanMonoSnk_16_1_Server_Preferred",
                "SingleDev_OneChanMonoSnk_16_1_1",
                "DualDev_OneChanMonoSrc_16_2_Server_Preferred",
                "SingleDev_OneChanStereoSrc_16_2_Server_Preferred",
                "SingleDev_OneChanMonoSrc_16_2_Server_Preferred"
            ]
        },
        {
            "name": "Conversational",
            "configurations": [
@@ -251,19 +213,6 @@
                "VND_SingleDev_TwoChanStereoSrc_48khz_100octs_Server_Preferred_1",
                "VND_SingleDev_TwoChanStereoSrc_48khz_100octs_R11_L40_1"
            ]
        },
        {
            "name": "Default",
            "configurations": [
                "DualDev_OneChanStereoSnk_16_2_Server_Preferred",
                "DualDev_OneChanStereoSnk_16_2_1",
                "SingleDev_OneChanStereoSnk_16_2_Server_Preferred",
                "SingleDev_OneChanStereoSnk_16_2_1",
                "SingleDev_TwoChanStereoSnk_16_2_Server_Preferred",
                "SingleDev_TwoChanStereoSnk_16_2_1",
                "SingleDev_OneChanMonoSnk_16_2_Server_Preferred",
                "SingleDev_OneChanMonoSnk_16_2_1"
            ]
        }
    ]
}
+5 −6
Original line number Diff line number Diff line
@@ -199,12 +199,6 @@ bool IsLc3SettingSupported(LeAudioContextType context_type, Lc3SettingId id) {
  /* Update those values, on any change of codec linked with content type */
  switch (context_type) {
    case LeAudioContextType::RINGTONE:
      if (id == Lc3SettingId::LC3_16_2 || id == Lc3SettingId::LC3_16_1 ||
          id == Lc3SettingId::LC3_32_2)
        return true;

      break;

    case LeAudioContextType::CONVERSATIONAL:
      if (id == Lc3SettingId::LC3_16_1 || id == Lc3SettingId::LC3_16_2 ||
          id == Lc3SettingId::LC3_24_1 || id == Lc3SettingId::LC3_24_2 ||
@@ -217,6 +211,11 @@ bool IsLc3SettingSupported(LeAudioContextType context_type, Lc3SettingId id) {
      break;

    case LeAudioContextType::MEDIA:
    case LeAudioContextType::ALERTS:
    case LeAudioContextType::INSTRUCTIONAL:
    case LeAudioContextType::NOTIFICATIONS:
    case LeAudioContextType::EMERGENCYALARM:
    case LeAudioContextType::UNSPECIFIED:
      if (id == Lc3SettingId::LC3_16_1 || id == Lc3SettingId::LC3_16_2 ||
          id == Lc3SettingId::LC3_32_1 || id == Lc3SettingId::LC3_32_2 ||
          id == Lc3SettingId::LC3_48_4 || id == Lc3SettingId::LC3_48_2 ||
+23 −4
Original line number Diff line number Diff line
@@ -1493,7 +1493,7 @@ class UnicastTestNoInit : public Test {
    if (reconfigure_existing_stream) {
      EXPECT_CALL(*mock_unicast_audio_source_, SuspendedForReconfiguration())
          .Times(1);
      EXPECT_CALL(*mock_unicast_audio_source_, ConfirmStreamingRequest())
      EXPECT_CALL(*mock_unicast_audio_source_, CancelStreamingRequest())
          .Times(1);
    } else {
      EXPECT_CALL(*mock_unicast_audio_source_, SuspendedForReconfiguration())
@@ -3152,7 +3152,7 @@ TEST_F(UnicastTest, TwoEarbudsStreaming) {
  Mock::VerifyAndClearExpectations(mock_audio_sink_);
}

TEST_F(UnicastTest, TwoEarbudsStreamingContextSwitchSimple) {
TEST_F(UnicastTest, TwoEarbudsStreamingContextSwitchNoReconfigure) {
  uint8_t group_size = 2;
  int group_id = 2;

@@ -3218,6 +3218,20 @@ TEST_F(UnicastTest, TwoEarbudsStreamingContextSwitchSimple) {
      StartStream(_, le_audio::types::LeAudioContextType::EMERGENCYALARM, _, _))
      .Times(1);
  UpdateMetadata(AUDIO_USAGE_EMERGENCY, AUDIO_CONTENT_TYPE_UNKNOWN);
  Mock::VerifyAndClearExpectations(&mock_client_callbacks_);
  Mock::VerifyAndClearExpectations(mock_unicast_audio_source_);

  // Do a content switch to INSTRUCTIONAL
  EXPECT_CALL(*mock_unicast_audio_source_, Release).Times(0);
  EXPECT_CALL(*mock_unicast_audio_source_, Stop).Times(0);
  EXPECT_CALL(*mock_unicast_audio_source_, Start).Times(0);
  EXPECT_CALL(
      mock_state_machine_,
      StartStream(_, le_audio::types::LeAudioContextType::INSTRUCTIONAL, _, _))
      .Times(1);
  UpdateMetadata(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
                 AUDIO_CONTENT_TYPE_UNKNOWN);
  Mock::VerifyAndClearExpectations(&mock_client_callbacks_);
  Mock::VerifyAndClearExpectations(mock_unicast_audio_source_);
}

@@ -3593,8 +3607,13 @@ TEST_F(UnicastTest, StartNotSupportedContextType) {
  // Verify Data transfer on one audio source cis
  TestAudioDataTransfer(group_id, cis_count_out, cis_count_in, 1920);

  EXPECT_CALL(mock_state_machine_, StopStream(_)).Times(0);
  UpdateMetadata(AUDIO_USAGE_GAME, AUDIO_CONTENT_TYPE_UNKNOWN);
  // Fallback scenario now supports 48Khz just like Media so we will reconfigure
  EXPECT_CALL(mock_state_machine_, StopStream(_)).Times(1);
  UpdateMetadata(AUDIO_USAGE_GAME, AUDIO_CONTENT_TYPE_UNKNOWN, true);

  /* The above will trigger reconfiguration. After that Audio Hal action
   * is needed to restart the stream */
  SinkAudioResume();
}
}  // namespace
}  // namespace le_audio
+70 −43
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include "flatbuffers/util.h"
#include "le_audio_set_configuration_provider.h"
#include "osi/include/log.h"
#include "osi/include/osi.h"

using le_audio::set_configurations::AudioSetConfiguration;
using le_audio::set_configurations::AudioSetConfigurations;
@@ -64,11 +65,67 @@ static const std::vector<

/** Provides a set configurations for the given context type */
struct AudioSetConfigurationProviderJson {
  static constexpr auto kDefaultScenario = "Media";

  AudioSetConfigurationProviderJson() {
    ASSERT_LOG(LoadContent(kLeAudioSetConfigs, kLeAudioSetScenarios),
               ": Unable to load le audio set configuration files.");
  }

  /* Use the same scenario configurations for different contexts to avoid
   * internal reconfiguration and handover that produces time gap. When using
   * the same scenario for different contexts, quality and configuration remains
   * the same while changing to same scenario based context type.
   */
  static auto ScenarioToContextTypes(const std::string& scenario) {
    static const std::multimap<std::string,
                               ::le_audio::types::LeAudioContextType>
        scenarios = {
            {"Media", types::LeAudioContextType::ALERTS},
            {"Media", types::LeAudioContextType::INSTRUCTIONAL},
            {"Media", types::LeAudioContextType::NOTIFICATIONS},
            {"Media", types::LeAudioContextType::EMERGENCYALARM},
            {"Media", types::LeAudioContextType::UNSPECIFIED},
            {"Media", types::LeAudioContextType::MEDIA},
            {"Conversational", types::LeAudioContextType::RINGTONE},
            {"Conversational", types::LeAudioContextType::CONVERSATIONAL},
            {"Recording", types::LeAudioContextType::LIVE},
            {"Game", types::LeAudioContextType::GAME},
            {"VoiceAssistants", types::LeAudioContextType::VOICEASSISTANTS},
        };
    return scenarios.equal_range(scenario);
  }

  static std::string ContextTypeToScenario(
      ::le_audio::types::LeAudioContextType context_type) {
    switch (context_type) {
      case types::LeAudioContextType::ALERTS:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::INSTRUCTIONAL:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::NOTIFICATIONS:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::EMERGENCYALARM:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::UNSPECIFIED:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::MEDIA:
        return "Media";
      case types::LeAudioContextType::RINGTONE:
        FALLTHROUGH_INTENDED;
      case types::LeAudioContextType::CONVERSATIONAL:
        return "Conversational";
      case types::LeAudioContextType::LIVE:
        return "Recording";
      case types::LeAudioContextType::GAME:
        return "Game";
      case types::LeAudioContextType::VOICEASSISTANTS:
        return "VoiceAssinstants";
      default:
        return kDefaultScenario;
    }
  }

  const AudioSetConfigurations* GetConfigurationsByContextType(
      LeAudioContextType context_type) const {
    if (context_configurations_.count(context_type))
@@ -77,17 +134,16 @@ struct AudioSetConfigurationProviderJson {
    LOG_WARN(": No predefined scenario for the context %d was found.",
             (int)context_type);

    auto fallback_scenario = "Default";
    context_type = ScenarioToContextType(fallback_scenario);

    if (context_configurations_.count(context_type)) {
      LOG_WARN(": Using %s scenario by default.", fallback_scenario);
      return &context_configurations_.at(context_type);
    auto [it_begin, it_end] = ScenarioToContextTypes(kDefaultScenario);
    if (it_begin != it_end) {
      LOG_WARN(": Using '%s' scenario by default.", kDefaultScenario);
      return &context_configurations_.at(it_begin->second);
    }

    LOG_ERROR(
        ": No fallback configuration for the 'Default' scenario or"
        " no valid audio set configurations loaded at all.");
        ": No valid configuration for the default '%s' scenario, or no audio "
        "set configurations loaded at all.",
        kDefaultScenario);
    return nullptr;
  };

@@ -446,9 +502,12 @@ struct AudioSetConfigurationProviderJson {

    LOG_DEBUG(": Updating %d scenarios.", flat_scenarios->size());
    for (auto const& scenario : *flat_scenarios) {
      auto [it_begin, it_end] =
          ScenarioToContextTypes(scenario->name()->c_str());
      for (auto it = it_begin; it != it_end; ++it) {
        context_configurations_.insert_or_assign(
          ScenarioToContextType(scenario->name()->c_str()),
          AudioSetConfigurationsFromFlatScenario(scenario));
            it->second, AudioSetConfigurationsFromFlatScenario(scenario));
      }
    }

    return true;
@@ -468,38 +527,6 @@ struct AudioSetConfigurationProviderJson {
    }
    return true;
  }

  std::string ContextTypeToScenario(
      ::le_audio::types::LeAudioContextType context_type) {
    switch (context_type) {
      case types::LeAudioContextType::MEDIA:
        return "Media";
      case types::LeAudioContextType::CONVERSATIONAL:
        return "Conversational";
      case types::LeAudioContextType::VOICEASSISTANTS:
        return "VoiceAssinstants";
      case types::LeAudioContextType::RINGTONE:
        return "Ringtone";
      default:
        return "Default";
    }
  }

  static ::le_audio::types::LeAudioContextType ScenarioToContextType(
      std::string scenario) {
    static const std::map<std::string, ::le_audio::types::LeAudioContextType>
        scenarios = {
            {"Media", types::LeAudioContextType::MEDIA},
            {"Conversational", types::LeAudioContextType::CONVERSATIONAL},
            {"Ringtone", types::LeAudioContextType::RINGTONE},
            {"Recording", types::LeAudioContextType::LIVE},
            {"Game", types::LeAudioContextType::GAME},
            {"VoiceAssistants", types::LeAudioContextType::VOICEASSISTANTS},
            {"Default", types::LeAudioContextType::UNSPECIFIED},
        };
    return scenarios.count(scenario) ? scenarios.at(scenario)
                                     : types::LeAudioContextType::RFU;
  }
};

struct AudioSetConfigurationProvider::impl {
+6 −0
Original line number Diff line number Diff line
@@ -26,6 +26,12 @@ using le_audio::types::LeAudioContextType;

namespace le_audio {
namespace utils {

/* The returned LeAudioContextType should have its entry in the
 * AudioSetConfigurationProvider's ContextTypeToScenario mapping table.
 * Otherwise the AudioSetConfigurationProvider will fall back
 * to default scenario.
 */
LeAudioContextType AudioContentToLeAudioContext(
    audio_content_type_t content_type, audio_usage_t usage) {
  /* Check audio attribute usage of stream */