Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 66ef83f3 authored by Jan Sebechlebsky's avatar Jan Sebechlebsky
Browse files

Reconfigure the input surface when needed.

When the set of output streams changes so that
different input configuration is picked, reconfigure
client to use new Surface with preferred input configuration.

Bug: 343628528
Test: atest virtual_camera_tests
Test: atest CtsVirtualDevicesCameraTestCases
Test: atest CtsVirtualDevicesCameraCtsTestCases

Change-Id: I8ec56adf0a230981be779a9b51d8505f1c83647c
parent 71028c9a
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -618,6 +618,10 @@ Resolution VirtualCameraDevice::getMaxInputResolution() const {
  return maxResolution.value();
}

int VirtualCameraDevice::allocateInputStreamId() {
  return mNextInputStreamId++;
}

std::shared_ptr<VirtualCameraDevice> VirtualCameraDevice::sharedFromThis() {
  // SharedRefBase which BnCameraDevice inherits from breaks
  // std::enable_shared_from_this. This is recommended replacement for
+5 −0
Original line number Diff line number Diff line
@@ -104,6 +104,9 @@ class VirtualCameraDevice
  // Returns largest supported input resolution.
  Resolution getMaxInputResolution() const;

  // Allocate and return next id for input stream (input surface).
  int allocateInputStreamId();

  // Maximal number of RAW streams - virtual camera doesn't support RAW streams.
  static constexpr int32_t kMaxNumberOfRawStreams = 0;

@@ -148,6 +151,8 @@ class VirtualCameraDevice
  const std::vector<
      aidl::android::companion::virtualcamera::SupportedStreamConfiguration>
      mSupportedInputConfigurations;

  std::atomic_int mNextInputStreamId;
};

}  // namespace virtualcamera
+73 −13
Original line number Diff line number Diff line
@@ -212,6 +212,27 @@ Resolution resolutionFromInputConfig(
  return Resolution(inputConfig.width, inputConfig.height);
}

std::optional<Resolution> resolutionFromSurface(const sp<Surface> surface) {
  Resolution res{0, 0};
  if (surface == nullptr) {
    ALOGE("%s: Cannot get resolution from null surface", __func__);
    return std::nullopt;
  }

  int status = surface->query(NATIVE_WINDOW_WIDTH, &res.width);
  if (status != NO_ERROR) {
    ALOGE("%s: Failed to get width from surface", __func__);
    return std::nullopt;
  }

  status = surface->query(NATIVE_WINDOW_HEIGHT, &res.height);
  if (status != NO_ERROR) {
    ALOGE("%s: Failed to get height from surface", __func__);
    return std::nullopt;
  }
  return res;
}

std::optional<SupportedStreamConfiguration> pickInputConfigurationForStreams(
    const std::vector<Stream>& requestedStreams,
    const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
@@ -292,13 +313,13 @@ VirtualCameraSession::VirtualCameraSession(

ndk::ScopedAStatus VirtualCameraSession::close() {
  ALOGV("%s", __func__);
  {
    std::lock_guard<std::mutex> lock(mLock);

    if (mVirtualCameraClientCallback != nullptr) {
    mVirtualCameraClientCallback->onStreamClosed(/*streamId=*/0);
      mVirtualCameraClientCallback->onStreamClosed(mCurrentInputStreamId);
    }

  {
    std::lock_guard<std::mutex> lock(mLock);
    if (mRenderThread != nullptr) {
      mRenderThread->stop();
      mRenderThread = nullptr;
@@ -339,6 +360,7 @@ ndk::ScopedAStatus VirtualCameraSession::configureStreams(
  }

  sp<Surface> inputSurface = nullptr;
  int inputStreamId = -1;
  std::optional<SupportedStreamConfiguration> inputConfig;
  {
    std::lock_guard<std::mutex> lock(mLock);
@@ -358,13 +380,49 @@ ndk::ScopedAStatus VirtualCameraSession::configureStreams(
          __func__, in_requestedConfiguration.toString().c_str());
      return cameraStatus(Status::ILLEGAL_ARGUMENT);
    }
    if (mRenderThread == nullptr) {

    if (mRenderThread != nullptr) {
      // If there's already a render thread, it means this is not a first
      // configuration call. If the surface has the same resolution and pixel
      // format as the picked config, we don't need to do anything, the current
      // render thread is capable of serving new set of configuration. However
      // if it differens, we need to discard the current surface and
      // reinitialize the render thread.

      std::optional<Resolution> currentInputResolution =
          resolutionFromSurface(mRenderThread->getInputSurface());
      if (currentInputResolution.has_value() &&
          *currentInputResolution == resolutionFromInputConfig(*inputConfig)) {
        ALOGI(
            "%s: Newly configured set of streams matches existing client "
            "surface (%dx%d)",
            __func__, currentInputResolution->width,
            currentInputResolution->height);
        return ndk::ScopedAStatus::ok();
      }

      if (mVirtualCameraClientCallback != nullptr) {
        mVirtualCameraClientCallback->onStreamClosed(mCurrentInputStreamId);
      }

      ALOGV(
          "%s: Newly requested output streams are not suitable for "
          "pre-existing surface (%dx%d), creating new surface (%dx%d)",
          __func__, currentInputResolution->width,
          currentInputResolution->height, inputConfig->width,
          inputConfig->height);

      mRenderThread->flush();
      mRenderThread->stop();
    }

    mRenderThread = std::make_unique<VirtualCameraRenderThread>(
        mSessionContext, resolutionFromInputConfig(*inputConfig),
        virtualCamera->getMaxInputResolution(), mCameraDeviceCallback);
    mRenderThread->start();
    inputSurface = mRenderThread->getInputSurface();
    }
    inputStreamId = mCurrentInputStreamId =
        virtualCamera->allocateInputStreamId();
  }

  if (mVirtualCameraClientCallback != nullptr && inputSurface != nullptr) {
@@ -372,7 +430,7 @@ ndk::ScopedAStatus VirtualCameraSession::configureStreams(
    // support for multiple input streams is implemented. For now we always
    // create single texture.
    mVirtualCameraClientCallback->onStreamConfigured(
        /*streamId=*/0, aidl::android::view::Surface(inputSurface.get()),
        inputStreamId, aidl::android::view::Surface(inputSurface.get()),
        inputConfig->width, inputConfig->height, inputConfig->pixelFormat);
  }

@@ -519,6 +577,7 @@ ndk::ScopedAStatus VirtualCameraSession::processCaptureRequest(

  std::shared_ptr<ICameraDeviceCallback> cameraCallback = nullptr;
  RequestSettings requestSettings;
  int currentInputStreamId;
  {
    std::lock_guard<std::mutex> lock(mLock);

@@ -537,6 +596,7 @@ ndk::ScopedAStatus VirtualCameraSession::processCaptureRequest(
    requestSettings = createSettingsFromMetadata(mCurrentRequestMetadata);

    cameraCallback = mCameraDeviceCallback;
    currentInputStreamId = mCurrentInputStreamId;
  }

  if (cameraCallback == nullptr) {
@@ -574,7 +634,7 @@ ndk::ScopedAStatus VirtualCameraSession::processCaptureRequest(

  if (mVirtualCameraClientCallback != nullptr) {
    auto status = mVirtualCameraClientCallback->onProcessCaptureRequest(
        /*streamId=*/0, request.frameNumber);
        currentInputStreamId, request.frameNumber);
    if (!status.isOk()) {
      ALOGE(
          "Failed to invoke onProcessCaptureRequest client callback for frame "
+2 −0
Original line number Diff line number Diff line
@@ -143,6 +143,8 @@ class VirtualCameraSession
      GUARDED_BY(mLock);

  std::unique_ptr<VirtualCameraRenderThread> mRenderThread GUARDED_BY(mLock);

  int mCurrentInputStreamId GUARDED_BY(mLock);
};

}  // namespace virtualcamera
+86 −0
Original line number Diff line number Diff line
@@ -379,6 +379,92 @@ TEST_F(VirtualCameraSessionInputChoiceTest,
          .isOk());
}

TEST_F(VirtualCameraSessionInputChoiceTest, reconfigureSwitchesInputStream) {
  // Create camera configured to support SVGA YUV input and RGB QVGA input.
  auto virtualCameraSession = createSession(
      {SupportedStreamConfiguration{.width = kSvgaWidth,
                                    .height = kSvgaHeight,
                                    .pixelFormat = Format::YUV_420_888,
                                    .maxFps = kMaxFps},
       SupportedStreamConfiguration{.width = kQvgaWidth,
                                    .height = kQvgaHeight,
                                    .pixelFormat = Format::RGBA_8888,
                                    .maxFps = kMaxFps}});

  // First configure QVGA stream.
  StreamConfiguration streamConfiguration;
  streamConfiguration.streams = {createStream(
      kStreamId, kQvgaWidth, kQvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
  std::vector<HalStream> halStreams;

  // Expect QVGA input configuragion to be chosen.
  EXPECT_CALL(*mMockVirtualCameraClientCallback,
              onStreamConfigured(kStreamId, _, kQvgaWidth, kQvgaHeight,
                                 Format::RGBA_8888));
  EXPECT_TRUE(
      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
          .isOk());

  // Reconfigure with additional VGA stream.
  streamConfiguration.streams.push_back(
      createStream(kStreamId + 1, kVgaWidth, kVgaHeight,
                   PixelFormat::IMPLEMENTATION_DEFINED));

  // Expect original surface to be discarded.
  EXPECT_CALL(*mMockVirtualCameraClientCallback, onStreamClosed(kStreamId));

  // Expect SVGA input configuragion to be chosen.
  EXPECT_CALL(*mMockVirtualCameraClientCallback,
              onStreamConfigured(kStreamId + 1, _, kSvgaWidth, kSvgaHeight,
                                 Format::YUV_420_888));
  EXPECT_TRUE(
      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
          .isOk());
}

TEST_F(VirtualCameraSessionInputChoiceTest,
       reconfigureKeepsInputStreamIfUnchanged) {
  // Create camera configured to support SVGA YUV input and RGB QVGA input.
  auto virtualCameraSession = createSession(
      {SupportedStreamConfiguration{.width = kSvgaWidth,
                                    .height = kSvgaHeight,
                                    .pixelFormat = Format::YUV_420_888,
                                    .maxFps = kMaxFps},
       SupportedStreamConfiguration{.width = kQvgaWidth,
                                    .height = kQvgaHeight,
                                    .pixelFormat = Format::RGBA_8888,
                                    .maxFps = kMaxFps}});

  // First configure SVGA stream.
  StreamConfiguration streamConfiguration;
  streamConfiguration.streams = {createStream(
      kStreamId, kSvgaWidth, kSvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
  std::vector<HalStream> halStreams;

  // Expect SVGA input configuragion to be chosen.
  EXPECT_CALL(*mMockVirtualCameraClientCallback,
              onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
                                 Format::YUV_420_888));
  EXPECT_TRUE(
      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
          .isOk());

  // Reconfigure with VGA + QVA stream. Because we only allow downscaling,
  // this will be matched to SVGA input resolution.
  streamConfiguration.streams = {
      createStream(kStreamId + 1, kVgaWidth, kVgaHeight,
                   PixelFormat::IMPLEMENTATION_DEFINED),
      createStream(kStreamId + 2, kVgaWidth, kVgaHeight,
                   PixelFormat::IMPLEMENTATION_DEFINED)};

  // Expect the onStreamConfigured callback not to be invoked, since the
  // original Surface is still best fit for current output streams.
  EXPECT_CALL(*mMockVirtualCameraClientCallback, onStreamConfigured).Times(0);
  EXPECT_TRUE(
      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
          .isOk());
}

}  // namespace
}  // namespace virtualcamera
}  // namespace companion