Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 511555dd authored by Eric Laurent's avatar Eric Laurent Committed by Gerrit Code Review
Browse files

Merge "Add multi-mic support for the noise suppression pre-processing effect"

parents 47dabd38 766bf734
Loading
Loading
Loading
Loading
+55 −18
Original line number Original line Diff line number Diff line
@@ -89,6 +89,7 @@ struct preproc_effect_s {
    preproc_session_t *session;     // session the effect is on
    preproc_session_t *session;     // session the effect is on
    const preproc_ops_t *ops;       // effect ops table
    const preproc_ops_t *ops;       // effect ops table
    preproc_fx_handle_t engine;     // handle on webRTC engine
    preproc_fx_handle_t engine;     // handle on webRTC engine
    uint32_t type;                  // subtype of effect
#ifdef DUAL_MIC_TEST
#ifdef DUAL_MIC_TEST
    bool aux_channels_on;           // support auxiliary channels
    bool aux_channels_on;           // support auxiliary channels
    size_t cur_channel_config;      // current auciliary channel configuration
    size_t cur_channel_config;      // current auciliary channel configuration
@@ -559,6 +560,21 @@ int NsInit (preproc_effect_t *effect)
    ALOGV("NsInit");
    ALOGV("NsInit");
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    ns->set_level(kNsDefaultLevel);
    ns->set_level(kNsDefaultLevel);
    webrtc::Config config;
    std::vector<webrtc::Point> geometry;
    // TODO(aluebs): Make the geometry settable.
    geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
    geometry.push_back(webrtc::Point(-0.01f, 0.f, 0.f));
    geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
    geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
    // The geometry needs to be set with Beamforming enabled.
    config.Set<webrtc::Beamforming>(
            new webrtc::Beamforming(true, geometry));
    effect->session->apm->SetExtraOptions(config);
    config.Set<webrtc::Beamforming>(
            new webrtc::Beamforming(false, geometry));
    effect->session->apm->SetExtraOptions(config);
    effect->type = NS_TYPE_SINGLE_CHANNEL;
    return 0;
    return 0;
}
}


@@ -584,11 +600,35 @@ int NsGetParameter(preproc_effect_t *effect __unused,
    return status;
    return status;
}
}


int NsSetParameter (preproc_effect_t *effect __unused,
int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
                    void *pParam __unused,
                    void *pValue __unused)
{
{
    int status = 0;
    int status = 0;
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    uint32_t param = *(uint32_t *)pParam;
    uint32_t value = *(uint32_t *)pValue;
    switch(param) {
        case NS_PARAM_LEVEL:
            ns->set_level((webrtc::NoiseSuppression::Level)value);
            ALOGV("NsSetParameter() level %d", value);
            break;
        case NS_PARAM_TYPE:
        {
            webrtc::Config config;
            std::vector<webrtc::Point> geometry;
            bool is_beamforming_enabled =
                    value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
            config.Set<webrtc::Beamforming>(
                    new webrtc::Beamforming(is_beamforming_enabled, geometry));
            effect->session->apm->SetExtraOptions(config);
            effect->type = value;
            ALOGV("NsSetParameter() type %d", value);
            break;
        }
        default:
            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
            status = -EINVAL;
    }

    return status;
    return status;
}
}


@@ -597,6 +637,12 @@ void NsEnable(preproc_effect_t *effect)
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    ALOGV("NsEnable ns %p", ns);
    ALOGV("NsEnable ns %p", ns);
    ns->Enable(true);
    ns->Enable(true);
    if (effect->type == NS_TYPE_MULTI_CHANNEL) {
        webrtc::Config config;
        std::vector<webrtc::Point> geometry;
        config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
        effect->session->apm->SetExtraOptions(config);
    }
}
}


void NsDisable(preproc_effect_t *effect)
void NsDisable(preproc_effect_t *effect)
@@ -604,6 +650,10 @@ void NsDisable(preproc_effect_t *effect)
    ALOGV("NsDisable");
    ALOGV("NsDisable");
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
    ns->Enable(false);
    ns->Enable(false);
    webrtc::Config config;
    std::vector<webrtc::Point> geometry;
    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
    effect->session->apm->SetExtraOptions(config);
}
}


static const preproc_ops_t sNsOps = {
static const preproc_ops_t sNsOps = {
@@ -897,17 +947,6 @@ int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
         config->inputCfg.samplingRate, config->inputCfg.channels);
         config->inputCfg.samplingRate, config->inputCfg.channels);
    int status;
    int status;


    // if at least one process is enabled, do not accept configuration changes
    if (session->enabledMsk) {
        if (session->samplingRate != config->inputCfg.samplingRate ||
                session->inChannelCount != inCnl ||
                session->outChannelCount != outCnl) {
            return -ENOSYS;
        } else {
            return 0;
        }
    }

    // AEC implementation is limited to 16kHz
    // AEC implementation is limited to 16kHz
    if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
    if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
        session->apmSamplingRate = 32000;
        session->apmSamplingRate = 32000;
@@ -1286,8 +1325,7 @@ int PreProcessingFx_Process(effect_handle_t self,
            }
            }
            session->framesIn = 0;
            session->framesIn = 0;
        }
        }
        session->procFrame->samples_per_channel_ =
        session->procFrame->samples_per_channel_ = session->apmFrameCount;
                session->apmFrameCount * session->inChannelCount;


        effect->session->apm->ProcessStream(session->procFrame);
        effect->session->apm->ProcessStream(session->procFrame);


@@ -1798,8 +1836,7 @@ int PreProcessingFx_ProcessReverse(effect_handle_t self,
            }
            }
            session->framesRev = 0;
            session->framesRev = 0;
        }
        }
        session->revFrame->samples_per_channel_ =
        session->revFrame->samples_per_channel_ = session->apmFrameCount;
                session->apmFrameCount * session->inChannelCount;
        effect->session->apm->AnalyzeReverseStream(session->revFrame);
        effect->session->apm->AnalyzeReverseStream(session->revFrame);
        return 0;
        return 0;
    } else {
    } else {