Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38ea517f authored by Nick Deakin's avatar Nick Deakin Committed by Android (Google) Code Review
Browse files

Merge "Add RecoveryMapMath tests; also some fixes."

parents 5956e69e 65f492ab
Loading
Loading
Loading
Loading
+40 −15
Original line number Diff line number Diff line
@@ -27,6 +27,8 @@ namespace android::recoverymap {
// Framework

const float kSdrWhiteNits = 100.0f;
const float kHlgMaxNits = 1000.0f;
const float kPqMaxNits = 10000.0f;

struct Color {
  union {
@@ -113,9 +115,14 @@ inline Color operator/(const Color& lhs, const float rhs) {

////////////////////////////////////////////////////////////////////////////////
// sRGB transformations
// NOTE: sRGB has the same color primaries as BT.709, but different transfer
// function. For this reason, all sRGB transformations here apply to BT.709,
// except for those concerning transfer functions.

/*
 * Calculate the luminance of a linear RGB sRGB pixel, according to IEC 61966-2-1.
 *
 * [0.0, 1.0] range in and out.
 */
float srgbLuminance(Color e);

@@ -142,7 +149,9 @@ Color srgbInvOetf(Color e_gamma);
// Display-P3 transformations

/*
 * Calculated the luminance of a linear RGB P3 pixel, according to EG 432-1.
 * Calculated the luminance of a linear RGB P3 pixel, according to SMPTE EG 432-1.
 *
 * [0.0, 1.0] range in and out.
 */
float p3Luminance(Color e);

@@ -152,6 +161,8 @@ float p3Luminance(Color e);

/*
 * Calculate the luminance of a linear RGB BT.2100 pixel.
 *
 * [0.0, 1.0] range in and out.
 */
float bt2100Luminance(Color e);

@@ -166,23 +177,35 @@ Color bt2100RgbToYuv(Color e_gamma);
Color bt2100YuvToRgb(Color e_gamma);

/*
 * Convert from scene luminance in nits to HLG.
 * Convert from scene luminance to HLG.
 *
 * [0.0, 1.0] range in and out.
 */
float hlgOetf(float e);
Color hlgOetf(Color e);

/*
 * Convert from HLG to scene luminance in nits.
 * Convert from HLG to scene luminance.
 *
 * [0.0, 1.0] range in and out.
 */
float hlgInvOetf(float e_gamma);
Color hlgInvOetf(Color e_gamma);

/*
 * Convert from scene luminance in nits to PQ.
 * Convert from scene luminance to PQ.
 *
 * [0.0, 1.0] range in and out.
 */
float pqOetf(float e);
Color pqOetf(Color e);

/*
 * Convert from PQ to scene luminance in nits.
 *
 * [0.0, 1.0] range in and out.
 */
float pqInvOetf(float e_gamma);
Color pqInvOetf(Color e_gamma);


@@ -230,34 +253,36 @@ uint8_t encodeRecovery(float y_sdr, float y_hdr, float hdr_ratio);
Color applyRecovery(Color e, float recovery, float hdr_ratio);

/*
 * Helper for sampling from images.
 * Helper for sampling from YUV 420 images.
 */
Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y);

/*
 * Helper for sampling from images.
 * Helper for sampling from P010 images.
 *
 * Expect narrow-range image data for P010.
 */
Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y);

/*
 * Sample the recovery value for the map from a given x,y coordinate on a scale
 * that is map scale factor larger than the map size.
 * Sample the image at the provided location, with a weighting based on nearby
 * pixels and the map scale factor.
 */
float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);

/*
 * Sample the image Y value at the provided location, with a weighting based on nearby pixels
 * and the map scale factor.
 * Sample the image at the provided location, with a weighting based on nearby
 * pixels and the map scale factor.
 *
 * Expect narrow-range image data for P010.
 */
Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);

/*
 * Sample the image Y value at the provided location, with a weighting based on nearby pixels
 * and the map scale factor. Assumes narrow-range image data for P010.
 * Sample the recovery value for the map from a given x,y coordinate on a scale
 * that is map scale factor larger than the map size.
 */
Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);

/*
 * Convert from Color to RGBA1010102.
+6 −3
Original line number Diff line number Diff line
@@ -390,12 +390,15 @@ status_t RecoveryMap::generateRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_4
  map_data.reset(reinterpret_cast<uint8_t*>(dest->data));

  ColorTransformFn hdrInvOetf = nullptr;
  float hdr_white_nits = 0.0f;
  switch (metadata->transferFunction) {
    case JPEGR_TF_HLG:
      hdrInvOetf = hlgInvOetf;
      hdr_white_nits = kHlgMaxNits;
      break;
    case JPEGR_TF_PQ:
      hdrInvOetf = pqInvOetf;
      hdr_white_nits = kPqMaxNits;
      break;
  }

@@ -426,7 +429,7 @@ status_t RecoveryMap::generateRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_4
      Color hdr_rgb_gamma = bt2100YuvToRgb(hdr_yuv_gamma);
      Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
      hdr_rgb = hdrGamutConversionFn(hdr_rgb);
      float hdr_y_nits = luminanceFn(hdr_rgb);
      float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;

      hdr_y_nits_avg += hdr_y_nits;
      if (hdr_y_nits > hdr_y_nits_max) {
@@ -448,13 +451,13 @@ status_t RecoveryMap::generateRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_4
                                         kMapDimensionScaleFactor, x, y);
      Color sdr_rgb_gamma = srgbYuvToRgb(sdr_yuv_gamma);
      Color sdr_rgb = srgbInvOetf(sdr_rgb_gamma);
      float sdr_y_nits = luminanceFn(sdr_rgb);
      float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;

      Color hdr_yuv_gamma = sampleP010(uncompressed_p010_image, kMapDimensionScaleFactor, x, y);
      Color hdr_rgb_gamma = bt2100YuvToRgb(hdr_yuv_gamma);
      Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
      hdr_rgb = hdrGamutConversionFn(hdr_rgb);
      float hdr_y_nits = luminanceFn(hdr_rgb);
      float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;

      size_t pixel_idx =  x + y * map_width;
      reinterpret_cast<uint8_t*>(dest->data)[pixel_idx] =
+101 −56
Original line number Diff line number Diff line
@@ -23,12 +23,14 @@ namespace android::recoverymap {
////////////////////////////////////////////////////////////////////////////////
// sRGB transformations

static const float kSrgbR = 0.299f, kSrgbG = 0.587f, kSrgbB = 0.114f;
// See IEC 61966-2-1, Equation F.7.
static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f;

float srgbLuminance(Color e) {
  return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b;
}

// See ECMA TR/98, Section 7.
static const float kSrgbRCr = 1.402f, kSrgbGCb = 0.34414f, kSrgbGCr = 0.71414f, kSrgbBCb = 1.772f;

Color srgbYuvToRgb(Color e_gamma) {
@@ -37,15 +39,18 @@ Color srgbYuvToRgb(Color e_gamma) {
             e_gamma.y + kSrgbBCb * e_gamma.u }}};
}

// See ECMA TR/98, Section 7.
static const float kSrgbYR = 0.299f, kSrgbYG = 0.587f, kSrgbYB = 0.114f;
static const float kSrgbUR = -0.1687f, kSrgbUG = -0.3313f, kSrgbUB = 0.5f;
static const float kSrgbVR = 0.5f, kSrgbVG = -0.4187f, kSrgbVB = -0.0813f;

Color srgbRgbToYuv(Color e_gamma) {
  return {{{ kSrgbR * e_gamma.r + kSrgbG * e_gamma.g + kSrgbB * e_gamma.b,
  return {{{ kSrgbYR * e_gamma.r + kSrgbYG * e_gamma.g + kSrgbYB * e_gamma.b,
             kSrgbUR * e_gamma.r + kSrgbUG * e_gamma.g + kSrgbUB * e_gamma.b,
             kSrgbVR * e_gamma.r + kSrgbVG * e_gamma.g + kSrgbVB * e_gamma.b }}};
}

// See IEC 61966-2-1, Equations F.5 and F.6.
float srgbInvOetf(float e_gamma) {
  if (e_gamma <= 0.04045f) {
    return e_gamma / 12.92f;
@@ -64,7 +69,8 @@ Color srgbInvOetf(Color e_gamma) {
////////////////////////////////////////////////////////////////////////////////
// Display-P3 transformations

static const float kP3R = 0.22897f, kP3G = 0.69174f, kP3B = 0.07929f;
// See SMPTE EG 432-1, Table 7-2.
static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f;

float p3Luminance(Color e) {
  return kP3R * e.r + kP3G * e.g + kP3B * e.b;
@@ -74,12 +80,14 @@ float p3Luminance(Color e) {
////////////////////////////////////////////////////////////////////////////////
// BT.2100 transformations - according to ITU-R BT.2100-2

// See ITU-R BT.2100-2, Table 5, HLG Reference OOTF
static const float kBt2100R = 0.2627f, kBt2100G = 0.6780f, kBt2100B = 0.0593f;

float bt2100Luminance(Color e) {
  return kBt2100R * e.r + kBt2100G * e.g + kBt2100B * e.b;
}

// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f;

Color bt2100RgbToYuv(Color e_gamma) {
@@ -89,9 +97,9 @@ Color bt2100RgbToYuv(Color e_gamma) {
             (e_gamma.r - y_gamma) / kBt2100Cr }}};
}

// Derived from the reverse of bt2100RgbToYuv. The derivation for R and B are
// pretty straight forward; we just reverse the formulas for U and V above. But
// deriving the formula for G is a bit more complicated:
// Derived by inversing bt2100RgbToYuv. The derivation for R and B are  pretty
// straight forward; we just invert the formulas for U and V above. But deriving
// the formula for G is a bit more complicated:
//
// Start with equation for luminance:
//   Y = kBt2100R * R + kBt2100G * G + kBt2100B * B
@@ -119,9 +127,10 @@ Color bt2100YuvToRgb(Color e_gamma) {
             e_gamma.y + kBt2100Cb * e_gamma.u }}};
}

// See ITU-R BT.2100-2, Table 5, HLG Reference OETF.
static const float kHlgA = 0.17883277f, kHlgB = 0.28466892f, kHlgC = 0.55991073;

static float hlgOetf(float e) {
float hlgOetf(float e) {
  if (e <= 1.0f/12.0f) {
    return sqrt(3.0f * e);
  } else {
@@ -133,7 +142,8 @@ Color hlgOetf(Color e) {
  return {{{ hlgOetf(e.r), hlgOetf(e.g), hlgOetf(e.b) }}};
}

static float hlgInvOetf(float e_gamma) {
// See ITU-R BT.2100-2, Table 5, HLG Reference EOTF.
float hlgInvOetf(float e_gamma) {
  if (e_gamma <= 0.5f) {
    return pow(e_gamma, 2.0f) / 3.0f;
  } else {
@@ -147,13 +157,14 @@ Color hlgInvOetf(Color e_gamma) {
             hlgInvOetf(e_gamma.b) }}};
}

// See ITU-R BT.2100-2, Table 4, Reference PQ OETF.
static const float kPqM1 = 2610.0f / 16384.0f, kPqM2 = 2523.0f / 4096.0f * 128.0f;
static const float kPqC1 = 3424.0f / 4096.0f, kPqC2 = 2413.0f / 4096.0f * 32.0f,
                   kPqC3 = 2392.0f / 4096.0f * 32.0f;

static float pqOetf(float e) {
  if (e < 0.0f) e = 0.0f;
  return pow((kPqC1 + kPqC2 * pow(e / 10000.0f, kPqM1)) / (1 + kPqC3 * pow(e / 10000.0f, kPqM1)),
float pqOetf(float e) {
  if (e <= 0.0f) return 0.0f;
  return pow((kPqC1 + kPqC2 * pow(e, kPqM1)) / (1 + kPqC3 * pow(e, kPqM1)),
             kPqM2);
}

@@ -161,10 +172,18 @@ Color pqOetf(Color e) {
  return {{{ pqOetf(e.r), pqOetf(e.g), pqOetf(e.b) }}};
}

static float pqInvOetf(float e_gamma) {
  static const float kPqInvOetfCoef = log2(-(pow(kPqM1, 1.0f / kPqM2) - kPqC1)
                                         / (kPqC3 * pow(kPqM1, 1.0f / kPqM2) - kPqC2));
  return kPqInvOetfCoef / log2(e_gamma * 10000.0f);
// Derived from the inverse of the Reference PQ OETF.
static const float kPqInvA = 128.0f, kPqInvB = 107.0f, kPqInvC = 2413.0f, kPqInvD = 2392.0f,
                   kPqInvE = 6.2773946361f, kPqInvF = 0.0126833f;

float pqInvOetf(float e_gamma) {
  // This equation blows up if e_gamma is 0.0, and checking on <= 0.0 doesn't
  // always catch 0.0. So, check on 0.0001, since anything this small will
  // effectively be crushed to zero anyways.
  if (e_gamma <= 0.0001f) return 0.0f;
  return pow((kPqInvA * pow(e_gamma, kPqInvF) - kPqInvB)
           / (kPqInvC - kPqInvD * pow(e_gamma, kPqInvF)),
             kPqInvE);
}

Color pqInvOetf(Color e_gamma) {
@@ -269,13 +288,14 @@ uint8_t encodeRecovery(float y_sdr, float y_hdr, float hdr_ratio) {
    gain = y_hdr / y_sdr;
  }

  if (gain < -hdr_ratio) gain = -hdr_ratio;
  if (gain < (1.0f / hdr_ratio)) gain = 1.0f / hdr_ratio;
  if (gain > hdr_ratio) gain = hdr_ratio;

  return static_cast<uint8_t>(log2(gain) / log2(hdr_ratio) * 127.5f  + 127.5f);
}

static float applyRecovery(float e, float recovery, float hdr_ratio) {
  if (e <= 0.0f) return 0.0f;
  return exp2(log2(e) + recovery * log2(hdr_ratio));
}

@@ -285,45 +305,6 @@ Color applyRecovery(Color e, float recovery, float hdr_ratio) {
             applyRecovery(e.b, recovery, hdr_ratio) }}};
}

// TODO: do we need something more clever for filtering either the map or images
// to generate the map?

static size_t clamp(const size_t& val, const size_t& low, const size_t& high) {
  return val < low ? low : (high < val ? high : val);
}

static float mapUintToFloat(uint8_t map_uint) {
  return (static_cast<float>(map_uint) - 127.5f) / 127.5f;
}

float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y) {
  float x_map = static_cast<float>(x) / static_cast<float>(map_scale_factor);
  float y_map = static_cast<float>(y) / static_cast<float>(map_scale_factor);

  size_t x_lower = static_cast<size_t>(floor(x_map));
  size_t x_upper = x_lower + 1;
  size_t y_lower = static_cast<size_t>(floor(y_map));
  size_t y_upper = y_lower + 1;

  x_lower = clamp(x_lower, 0, map->width - 1);
  x_upper = clamp(x_upper, 0, map->width - 1);
  y_lower = clamp(y_lower, 0, map->height - 1);
  y_upper = clamp(y_upper, 0, map->height - 1);

  float x_influence = x_map - static_cast<float>(x_lower);
  float y_influence = y_map - static_cast<float>(y_lower);

  float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
  float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
  float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
  float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);

  return e1 * (x_influence + y_influence) / 2.0f
      + e2 * (x_influence + 1.0f - y_influence) / 2.0f
      + e3 * (1.0f - x_influence + y_influence) / 2.0f
      + e4 * (1.0f - x_influence + 1.0f - y_influence) / 2.0f;
}

Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
  size_t pixel_count = image->width * image->height;

@@ -382,6 +363,70 @@ Color sampleP010(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, s
  return samplePixels(image, map_scale_factor, x, y, getP010Pixel);
}

// TODO: do we need something more clever for filtering either the map or images
// to generate the map?

static size_t clamp(const size_t& val, const size_t& low, const size_t& high) {
  return val < low ? low : (high < val ? high : val);
}

static float mapUintToFloat(uint8_t map_uint) {
  return (static_cast<float>(map_uint) - 127.5f) / 127.5f;
}

static float pythDistance(float x_diff, float y_diff) {
  return sqrt(pow(x_diff, 2.0f) + pow(y_diff, 2.0f));
}

float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y) {
  float x_map = static_cast<float>(x) / static_cast<float>(map_scale_factor);
  float y_map = static_cast<float>(y) / static_cast<float>(map_scale_factor);

  size_t x_lower = static_cast<size_t>(floor(x_map));
  size_t x_upper = x_lower + 1;
  size_t y_lower = static_cast<size_t>(floor(y_map));
  size_t y_upper = y_lower + 1;

  x_lower = clamp(x_lower, 0, map->width - 1);
  x_upper = clamp(x_upper, 0, map->width - 1);
  y_lower = clamp(y_lower, 0, map->height - 1);
  y_upper = clamp(y_upper, 0, map->height - 1);

  // Use Shepard's method for inverse distance weighting. For more information:
  // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method

  float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
  float e1_dist = pythDistance(x_map - static_cast<float>(x_lower),
                               y_map - static_cast<float>(y_lower));
  if (e1_dist == 0.0f) return e1;

  float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
  float e2_dist = pythDistance(x_map - static_cast<float>(x_lower),
                               y_map - static_cast<float>(y_upper));
  if (e2_dist == 0.0f) return e2;

  float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
  float e3_dist = pythDistance(x_map - static_cast<float>(x_upper),
                               y_map - static_cast<float>(y_lower));
  if (e3_dist == 0.0f) return e3;

  float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);
  float e4_dist = pythDistance(x_map - static_cast<float>(x_upper),
                               y_map - static_cast<float>(y_upper));
  if (e4_dist == 0.0f) return e2;

  float e1_weight = 1.0f / e1_dist;
  float e2_weight = 1.0f / e2_dist;
  float e3_weight = 1.0f / e3_dist;
  float e4_weight = 1.0f / e4_dist;
  float total_weight = e1_weight + e2_weight + e3_weight + e4_weight;

  return e1 * (e1_weight / total_weight)
       + e2 * (e2_weight / total_weight)
       + e3 * (e3_weight / total_weight)
       + e4 * (e4_weight / total_weight);
}

uint32_t colorToRgba1010102(Color e_gamma) {
  return (0x3ff & static_cast<uint32_t>(e_gamma.r * 1023.0f))
       | ((0x3ff & static_cast<uint32_t>(e_gamma.g * 1023.0f)) << 10)
+3 −1
Original line number Diff line number Diff line
@@ -26,13 +26,15 @@ cc_test {
    test_suites: ["device-tests"],
    srcs: [
        "recoverymap_test.cpp",
        "recoverymapmath_test.cpp",
    ],
    shared_libs: [
        "libimage_io",
        "libjpeg",
        "liblog",
    ],
    static_libs: [
        "libimage_io",
        "libgmock",
        "libgtest",
        "libjpegdecoder",
        "libjpegencoder",
+882 −0

File added.

Preview size limit exceeded, changes collapsed.