summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Deakin <deakin@google.com>2023-05-19 17:14:45 -0400
committerNick Deakin <deakin@google.com>2023-06-08 10:14:12 -0400
commit0db53ee3c9ae908d14c09290a4fb51036df25620 (patch)
tree99fa81400c2b05f540b48c9280fe5117edd871c9
parentfcb80a221e7ac3548ff5d751eea14c708a6f43fd (diff)
downloadnative-0db53ee3c9ae908d14c09290a4fb51036df25620.tar.gz
libultrahdr: correct srgb, p3 calculations and jpeg yuv handling
* Correct luminance calculation for sRGB to utilize actual luminance coefficients for the gamut, rather than 601 luma coefficients. * Correct YUV<->RGB conversion for sRGB to utilize Rec.709 coefficients rather than Rec.601 coefficients as it was previously. * New P3 YUV<->RGB conversion, which uses Rec.601 coefficients. * Also ICC Profile fixes to make things work; more below. * Update things to correctly convert to and from Rec.601 YUV for jpeg encoding; more below. This setup for YUV<->RGB coefficients is chosen to match the expectations of DataSpace when it comes to interpretting YUV encoding of data. Generally, the interpretation is cued off of the color primaries, since the specifications around color primaries generally also specify a YUV interpretation. Display-P3 is a bit of an outlier; the best specification of Display-P3 is in SMPTE EG 432-1, but EG 432-1 doesn't cover YUV interpretation. So, since DataSpace interprets Display-P3 YUV data via the Rec.601 coefficients, we should do the same here. ICC Profile fixes; ICC profiles we wrote were broken before this for a variety of reasons: * The endianness macro wasn't actually swapping endiannesas to provide the correct encoding in our output. * We weren't writing out the identifier for the app segment, including the chunk count and ID. * We were assuming input JPEGs have ICC data, which may not be the case. * We also need to read in the ICC profile during decode to apply the map properly, and we didn't have any mechanism previously to read the ICC profile and determine the gamut of the encoded JPEGR file. * Upon adding ICC reading code to our JPEG decoding, also remove some dead code from previous EXIF reading. * Add a number of tests to verify all of this stuff stays fixed. YUV interpretation and Rec.601: * Previously, we were feeding YUV right into the JPEG encoder; this is problematic because JPEG encoders usually (and definitely in our specific case) expect Rec.601 YUV encoded input data, since this is by definition the format of JPEG YUV data according to ECMA TR/98. * Now properly convert from Rec.709 or Rec.2100 YUV encoding to Rec.601 (when necessary) prior to passing YUV data to the jpeg encoder. * Also make sure we properly interpret decoded YUV output as Rec.601 after decode. * This involved added some new methods to facilitate these conversions. * Added some new tests to verify these conversions. * Note that to do these YUV conversions for subsampled 420 data, we take each set of 4 Y and 1 UV, and calculate the result against each combination. The new Y values each get the corresponding result, and the new UV value is equal to the average of the set. * Note that none of this is a concern for gain map encoding/decoding via JPEG because gain maps are single channel. Bug: 283143961 Test: added new tests, all tests pass Change-Id: Ibc7b1779fc3a8244f85abb581c554963f57dc5a4
-rw-r--r--libs/ultrahdr/gainmapmath.cpp149
-rw-r--r--libs/ultrahdr/icc.cpp96
-rw-r--r--libs/ultrahdr/include/ultrahdr/gainmapmath.h65
-rw-r--r--libs/ultrahdr/include/ultrahdr/icc.h25
-rw-r--r--libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h15
-rw-r--r--libs/ultrahdr/include/ultrahdr/jpegr.h44
-rw-r--r--libs/ultrahdr/jpegdecoderhelper.cpp52
-rw-r--r--libs/ultrahdr/jpegr.cpp228
-rw-r--r--libs/ultrahdr/tests/Android.bp5
-rw-r--r--libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpgbin0 -> 37101 bytes
-rw-r--r--libs/ultrahdr/tests/gainmapmath_test.cpp229
-rw-r--r--libs/ultrahdr/tests/icchelper_test.cpp77
-rw-r--r--libs/ultrahdr/tests/jpegdecoderhelper_test.cpp58
13 files changed, 954 insertions, 89 deletions
diff --git a/libs/ultrahdr/gainmapmath.cpp b/libs/ultrahdr/gainmapmath.cpp
index 37c3cf3d3b..ee15363b69 100644
--- a/libs/ultrahdr/gainmapmath.cpp
+++ b/libs/ultrahdr/gainmapmath.cpp
@@ -119,34 +119,39 @@ static float clampPixelFloat(float value) {
return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value;
}
-// See IEC 61966-2-1, Equation F.7.
+// See IEC 61966-2-1/Amd 1:2003, Equation F.7.
static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f;
float srgbLuminance(Color e) {
return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b;
}
-// See ECMA TR/98, Section 7.
-static const float kSrgbRCr = 1.402f, kSrgbGCb = 0.34414f, kSrgbGCr = 0.71414f, kSrgbBCb = 1.772f;
+// See ITU-R BT.709-6, Section 3.
+// Uses the same coefficients for deriving luma signal as
+// IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance
+// function above.
+static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f;
-Color srgbYuvToRgb(Color e_gamma) {
- return {{{ clampPixelFloat(e_gamma.y + kSrgbRCr * e_gamma.v),
- clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
- clampPixelFloat(e_gamma.y + kSrgbBCb * e_gamma.u) }}};
+Color srgbRgbToYuv(Color e_gamma) {
+ float y_gamma = srgbLuminance(e_gamma);
+ return {{{ y_gamma,
+ (e_gamma.b - y_gamma) / kSrgbCb,
+ (e_gamma.r - y_gamma) / kSrgbCr }}};
}
-// See ECMA TR/98, Section 7.
-static const float kSrgbYR = 0.299f, kSrgbYG = 0.587f, kSrgbYB = 0.114f;
-static const float kSrgbUR = -0.1687f, kSrgbUG = -0.3313f, kSrgbUB = 0.5f;
-static const float kSrgbVR = 0.5f, kSrgbVG = -0.4187f, kSrgbVB = -0.0813f;
+// See ITU-R BT.709-6, Section 3.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we
+// can reuse the luminance coefficients since they are the same.
+static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG;
+static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG;
-Color srgbRgbToYuv(Color e_gamma) {
- return {{{ kSrgbYR * e_gamma.r + kSrgbYG * e_gamma.g + kSrgbYB * e_gamma.b,
- kSrgbUR * e_gamma.r + kSrgbUG * e_gamma.g + kSrgbUB * e_gamma.b,
- kSrgbVR * e_gamma.r + kSrgbVG * e_gamma.g + kSrgbVB * e_gamma.b }}};
+Color srgbYuvToRgb(Color e_gamma) {
+ return {{{ clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u) }}};
}
-// See IEC 61966-2-1, Equations F.5 and F.6.
+// See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6.
float srgbInvOetf(float e_gamma) {
if (e_gamma <= 0.04045f) {
return e_gamma / 12.92f;
@@ -178,13 +183,38 @@ Color srgbInvOetfLUT(Color e_gamma) {
////////////////////////////////////////////////////////////////////////////////
// Display-P3 transformations
-// See SMPTE EG 432-1, Table 7-2.
+// See SMPTE EG 432-1, Equation 7-8.
static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f;
float p3Luminance(Color e) {
return kP3R * e.r + kP3G * e.g + kP3B * e.b;
}
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Unfortunately, calculation of luma signal differs from calculation of
+// luminance for Display-P3, so we can't reuse p3Luminance here.
+static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f;
+static const float kP3Cb = 1.772f, kP3Cr = 1.402f;
+
+Color p3RgbToYuv(Color e_gamma) {
+ float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b;
+ return {{{ y_gamma,
+ (e_gamma.b - y_gamma) / kP3Cb,
+ (e_gamma.r - y_gamma) / kP3Cr }}};
+}
+
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must
+// use luma signal coefficients rather than the luminance coefficients.
+static const float kP3GCb = kP3YB * kP3Cb / kP3YG;
+static const float kP3GCr = kP3YR * kP3Cr / kP3YG;
+
+Color p3YuvToRgb(Color e_gamma) {
+ return {{{ clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v),
+ clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v),
+ clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u) }}};
+}
+
////////////////////////////////////////////////////////////////////////////////
// BT.2100 transformations - according to ITU-R BT.2100-2
@@ -197,6 +227,8 @@ float bt2100Luminance(Color e) {
}
// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+// BT.2100 uses the same coefficients for calculating luma signal and luminance,
+// so we reuse the luminance function here.
static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f;
Color bt2100RgbToYuv(Color e_gamma) {
@@ -206,6 +238,10 @@ Color bt2100RgbToYuv(Color e_gamma) {
(e_gamma.r - y_gamma) / kBt2100Cr }}};
}
+// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+//
+// Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients.
+//
// Derived by inversing bt2100RgbToYuv. The derivation for R and B are pretty
// straight forward; we just invert the formulas for U and V above. But deriving
// the formula for G is a bit more complicated:
@@ -440,6 +476,85 @@ ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut,
}
}
+// All of these conversions are derived from the respective input YUV->RGB conversion followed by
+// the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this
+// file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match
+// DataSpace.
+
+Color yuv709To601(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.101579f * e_gamma.u + 0.196076f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.989854f * e_gamma.u + -0.110653f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.072453f * e_gamma.u + 0.983398f * e_gamma.v }}};
+}
+
+Color yuv709To2100(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.016969f * e_gamma.u + 0.096312f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.995306f * e_gamma.u + -0.051192f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.011507f * e_gamma.u + 1.002637f * e_gamma.v }}};
+}
+
+Color yuv601To709(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.018640f * e_gamma.u + 0.114618f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.075049f * e_gamma.u + 1.025327f * e_gamma.v }}};
+}
+
+Color yuv601To2100(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.010016f * e_gamma.u + 0.061592f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.086969f * e_gamma.u + 1.029350f * e_gamma.v }}};
+}
+
+Color yuv2100To709(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.018149f * e_gamma.u + -0.095132f * e_gamma.v,
+ 0.0f * e_gamma.y + 1.004123f * e_gamma.u + 0.051267f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.011524f * e_gamma.u + 0.996782f * e_gamma.v }}};
+}
+
+Color yuv2100To601(Color e_gamma) {
+ return {{{ 1.0f * e_gamma.y + 0.117887f * e_gamma.u + 0.105521f * e_gamma.v,
+ 0.0f * e_gamma.y + 0.995211f * e_gamma.u + -0.059549f * e_gamma.v,
+ 0.0f * e_gamma.y + -0.084085f * e_gamma.u + 0.976518f * e_gamma.v }}};
+}
+
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+ ColorTransformFn fn) {
+ Color yuv1 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 );
+ Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 );
+ Color yuv3 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 + 1);
+ Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1);
+
+ yuv1 = fn(yuv1);
+ yuv2 = fn(yuv2);
+ yuv3 = fn(yuv3);
+ yuv4 = fn(yuv4);
+
+ Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f;
+
+ size_t pixel_y1_idx = x_chroma * 2 + y_chroma * 2 * image->width;
+ size_t pixel_y2_idx = (x_chroma * 2 + 1) + y_chroma * 2 * image->width;
+ size_t pixel_y3_idx = x_chroma * 2 + (y_chroma * 2 + 1) * image->width;
+ size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->width;
+
+ uint8_t& y1_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y1_idx];
+ uint8_t& y2_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y2_idx];
+ uint8_t& y3_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y3_idx];
+ uint8_t& y4_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y4_idx];
+
+ size_t pixel_count = image->width * image->height;
+ size_t pixel_uv_idx = x_chroma + y_chroma * (image->width / 2);
+
+ uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
+ uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+
+ y1_uint = static_cast<uint8_t>(floor(yuv1.y * 255.0f + 0.5f));
+ y2_uint = static_cast<uint8_t>(floor(yuv2.y * 255.0f + 0.5f));
+ y3_uint = static_cast<uint8_t>(floor(yuv3.y * 255.0f + 0.5f));
+ y4_uint = static_cast<uint8_t>(floor(yuv4.y * 255.0f + 0.5f));
+
+ u_uint = static_cast<uint8_t>(floor(new_uv.u * 255.0f + 128.0f + 0.5f));
+ v_uint = static_cast<uint8_t>(floor(new_uv.v * 255.0f + 128.0f + 0.5f));
+}
////////////////////////////////////////////////////////////////////////////////
// Gain map calculations
diff --git a/libs/ultrahdr/icc.cpp b/libs/ultrahdr/icc.cpp
index 32d08aa525..1ab3c7c793 100644
--- a/libs/ultrahdr/icc.cpp
+++ b/libs/ultrahdr/icc.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#ifndef USE_BIG_ENDIAN
+#define USE_BIG_ENDIAN true
+#endif
+
#include <ultrahdr/icc.h>
#include <ultrahdr/gainmapmath.h>
#include <vector>
@@ -540,13 +544,21 @@ sp<DataStruct> IccHelper::writeIccProfile(ultrahdr_transfer_function tf,
size_t tag_table_size = kICCTagTableEntrySize * tags.size();
size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size;
+ sp<DataStruct> dataStruct = sp<DataStruct>::make(profile_size + kICCIdentifierSize);
+
+ // Write identifier, chunk count, and chunk ID
+ if (!dataStruct->write(kICCIdentifier, sizeof(kICCIdentifier)) ||
+ !dataStruct->write8(1) || !dataStruct->write8(1)) {
+ ALOGE("writeIccProfile(): error in identifier");
+ return dataStruct;
+ }
+
// Write the header.
header.data_color_space = Endian_SwapBE32(Signature_RGB);
header.pcs = Endian_SwapBE32(tf == ULTRAHDR_TF_PQ ? Signature_Lab : Signature_XYZ);
header.size = Endian_SwapBE32(profile_size);
header.tag_count = Endian_SwapBE32(tags.size());
- sp<DataStruct> dataStruct = sp<DataStruct>::make(profile_size);
if (!dataStruct->write(&header, sizeof(header))) {
ALOGE("writeIccProfile(): error in header");
return dataStruct;
@@ -582,4 +594,84 @@ sp<DataStruct> IccHelper::writeIccProfile(ultrahdr_transfer_function tf,
return dataStruct;
}
-} // namespace android::ultrahdr \ No newline at end of file
+bool IccHelper::tagsEqualToMatrix(const Matrix3x3& matrix,
+ const uint8_t* red_tag,
+ const uint8_t* green_tag,
+ const uint8_t* blue_tag) {
+ sp<DataStruct> red_tag_test = write_xyz_tag(matrix.vals[0][0], matrix.vals[1][0],
+ matrix.vals[2][0]);
+ sp<DataStruct> green_tag_test = write_xyz_tag(matrix.vals[0][1], matrix.vals[1][1],
+ matrix.vals[2][1]);
+ sp<DataStruct> blue_tag_test = write_xyz_tag(matrix.vals[0][2], matrix.vals[1][2],
+ matrix.vals[2][2]);
+ return memcmp(red_tag, red_tag_test->getData(), kColorantTagSize) == 0 &&
+ memcmp(green_tag, green_tag_test->getData(), kColorantTagSize) == 0 &&
+ memcmp(blue_tag, blue_tag_test->getData(), kColorantTagSize) == 0;
+}
+
+ultrahdr_color_gamut IccHelper::readIccColorGamut(void* icc_data, size_t icc_size) {
+ // Each tag table entry consists of 3 fields of 4 bytes each.
+ static const size_t kTagTableEntrySize = 12;
+
+ if (icc_data == nullptr || icc_size < sizeof(ICCHeader) + kICCIdentifierSize) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ if (memcmp(icc_data, kICCIdentifier, sizeof(kICCIdentifier)) != 0) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc_data) + kICCIdentifierSize;
+
+ ICCHeader* header = reinterpret_cast<ICCHeader*>(icc_bytes);
+
+ // Use 0 to indicate not found, since offsets are always relative to start
+ // of ICC data and therefore a tag offset of zero would never be valid.
+ size_t red_primary_offset = 0, green_primary_offset = 0, blue_primary_offset = 0;
+ size_t red_primary_size = 0, green_primary_size = 0, blue_primary_size = 0;
+ for (size_t tag_idx = 0; tag_idx < Endian_SwapBE32(header->tag_count); ++tag_idx) {
+ uint32_t* tag_entry_start = reinterpret_cast<uint32_t*>(
+ icc_bytes + sizeof(ICCHeader) + tag_idx * kTagTableEntrySize);
+ // first 4 bytes are the tag signature, next 4 bytes are the tag offset,
+ // last 4 bytes are the tag length in bytes.
+ if (red_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_rXYZ)) {
+ red_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ red_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ } else if (green_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_gXYZ)) {
+ green_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ green_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ } else if (blue_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_bXYZ)) {
+ blue_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+ blue_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+ }
+ }
+
+ if (red_primary_offset == 0 || red_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + red_primary_offset + red_primary_size > icc_size ||
+ green_primary_offset == 0 || green_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + green_primary_offset + green_primary_size > icc_size ||
+ blue_primary_offset == 0 || blue_primary_size != kColorantTagSize ||
+ kICCIdentifierSize + blue_primary_offset + blue_primary_size > icc_size) {
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+ }
+
+ uint8_t* red_tag = icc_bytes + red_primary_offset;
+ uint8_t* green_tag = icc_bytes + green_primary_offset;
+ uint8_t* blue_tag = icc_bytes + blue_primary_offset;
+
+ // Serialize tags as we do on encode and compare what we find to that to
+ // determine the gamut (since we don't have a need yet for full deserialize).
+ if (tagsEqualToMatrix(kSRGB, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_BT709;
+ } else if (tagsEqualToMatrix(kDisplayP3, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_P3;
+ } else if (tagsEqualToMatrix(kRec2020, red_tag, green_tag, blue_tag)) {
+ return ULTRAHDR_COLORGAMUT_BT2100;
+ }
+
+ // Didn't find a match to one of the profiles we write; indicate the gamut
+ // is unspecified since we don't understand it.
+ return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/include/ultrahdr/gainmapmath.h b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
index abc93567f2..13832db752 100644
--- a/libs/ultrahdr/include/ultrahdr/gainmapmath.h
+++ b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
@@ -218,24 +218,30 @@ struct ShepardsIDW {
// except for those concerning transfer functions.
/*
- * Calculate the luminance of a linear RGB sRGB pixel, according to IEC 61966-2-1.
+ * Calculate the luminance of a linear RGB sRGB pixel, according to
+ * IEC 61966-2-1/Amd 1:2003.
*
* [0.0, 1.0] range in and out.
*/
float srgbLuminance(Color e);
/*
- * Convert from OETF'd srgb YUV to RGB, according to ECMA TR/98.
+ * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
-Color srgbYuvToRgb(Color e_gamma);
+Color srgbRgbToYuv(Color e_gamma);
+
/*
- * Convert from OETF'd srgb RGB to YUV, according to ECMA TR/98.
+ * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
-Color srgbRgbToYuv(Color e_gamma);
+Color srgbYuvToRgb(Color e_gamma);
/*
- * Convert from srgb to linear, according to IEC 61966-2-1.
+ * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003.
*
* [0.0, 1.0] range in and out.
*/
@@ -257,6 +263,20 @@ constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision;
*/
float p3Luminance(Color e);
+/*
+ * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3RgbToYuv(Color e_gamma);
+
+/*
+ * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3YuvToRgb(Color e_gamma);
+
////////////////////////////////////////////////////////////////////////////////
// BT.2100 transformations - according to ITU-R BT.2100-2
@@ -269,12 +289,16 @@ float p3Luminance(Color e);
float bt2100Luminance(Color e);
/*
- * Convert from OETF'd BT.2100 RGB to YUV.
+ * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
Color bt2100RgbToYuv(Color e_gamma);
/*
- * Convert from OETF'd BT.2100 YUV to RGB.
+ * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
*/
Color bt2100YuvToRgb(Color e_gamma);
@@ -358,6 +382,31 @@ inline Color identityConversion(Color e) { return e; }
*/
ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut);
+/*
+ * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2.
+ *
+ * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is
+ * treated as Bt.601 by DataSpace, hence we do the same.
+ */
+Color yuv709To601(Color e_gamma);
+Color yuv709To2100(Color e_gamma);
+Color yuv601To709(Color e_gamma);
+Color yuv601To2100(Color e_gamma);
+Color yuv2100To709(Color e_gamma);
+Color yuv2100To601(Color e_gamma);
+
+/*
+ * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image.
+ *
+ * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets
+ * this result, and UV gets the averaged result.
+ *
+ * x_chroma and y_chroma should be less than or equal to half the image's width and height
+ * respecitively, since input is 4:2:0 subsampled.
+ */
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+ ColorTransformFn fn);
+
////////////////////////////////////////////////////////////////////////////////
// Gain map calculations
diff --git a/libs/ultrahdr/include/ultrahdr/icc.h b/libs/ultrahdr/include/ultrahdr/icc.h
index 7f6ab882c6..7f047f8f5b 100644
--- a/libs/ultrahdr/include/ultrahdr/icc.h
+++ b/libs/ultrahdr/include/ultrahdr/icc.h
@@ -56,12 +56,16 @@ enum {
Signature_XYZ = 0x58595A20,
};
-
typedef uint32_t FourByteTag;
static inline constexpr FourByteTag SetFourByteTag(char a, char b, char c, char d) {
return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d);
}
+static constexpr char kICCIdentifier[] = "ICC_PROFILE";
+// 12 for the actual identifier, +2 for the chunk count and chunk index which
+// will always follow.
+static constexpr size_t kICCIdentifierSize = 14;
+
// This is equal to the header size according to the ICC specification (128)
// plus the size of the tag count (4). We include the tag count since we
// always require it to be present anyway.
@@ -70,6 +74,10 @@ static constexpr size_t kICCHeaderSize = 132;
// Contains a signature (4), offset (4), and size (4).
static constexpr size_t kICCTagTableEntrySize = 12;
+// size should be 20; 4 bytes for type descriptor, 4 bytes reserved, 12
+// bytes for a single XYZ number type (4 bytes per coordinate).
+static constexpr size_t kColorantTagSize = 20;
+
static constexpr uint32_t kDisplay_Profile = SetFourByteTag('m', 'n', 't', 'r');
static constexpr uint32_t kRGB_ColorSpace = SetFourByteTag('R', 'G', 'B', ' ');
static constexpr uint32_t kXYZ_PCSSpace = SetFourByteTag('X', 'Y', 'Z', ' ');
@@ -225,10 +233,23 @@ private:
static void compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]);
static sp<DataStruct> write_clut(const uint8_t* grid_points, const uint8_t* grid_16);
+ // Checks if a set of xyz tags is equivalent to a 3x3 Matrix. Each input
+ // tag buffer assumed to be at least kColorantTagSize in size.
+ static bool tagsEqualToMatrix(const Matrix3x3& matrix,
+ const uint8_t* red_tag,
+ const uint8_t* green_tag,
+ const uint8_t* blue_tag);
+
public:
+ // Output includes JPEG embedding identifier and chunk information, but not
+ // APPx information.
static sp<DataStruct> writeIccProfile(const ultrahdr_transfer_function tf,
const ultrahdr_color_gamut gamut);
+ // NOTE: this function is not robust; it can infer gamuts that IccHelper
+ // writes out but should not be considered a reference implementation for
+ // robust parsing of ICC profiles or their gamuts.
+ static ultrahdr_color_gamut readIccColorGamut(void* icc_data, size_t icc_size);
};
} // namespace android::ultrahdr
-#endif //ANDROID_ULTRAHDR_ICC_H \ No newline at end of file
+#endif //ANDROID_ULTRAHDR_ICC_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
index 4f2b7423c8..8b5499a2c0 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
@@ -83,11 +83,14 @@ public:
*/
size_t getEXIFSize();
/*
- * Returns the position offset of EXIF package
- * (4 bypes offset to FF sign, the byte after FF E1 XX XX <this byte>),
- * or -1 if no EXIF exists.
+ * Returns the ICC data from the image.
*/
- int getEXIFPos() { return mExifPos; }
+ void* getICCPtr();
+ /*
+ * Returns the decompressed ICC buffer size. This method must be called only after
+ * calling decompressImage() or getCompressedImageParameters().
+ */
+ size_t getICCSize();
/*
* Decompresses metadata of the image. All vectors are owned by the caller.
*/
@@ -112,12 +115,12 @@ private:
std::vector<JOCTET> mXMPBuffer;
// The buffer that holds EXIF Data.
std::vector<JOCTET> mEXIFBuffer;
+ // The buffer that holds ICC Data.
+ std::vector<JOCTET> mICCBuffer;
// Resolution of the decompressed image.
size_t mWidth;
size_t mHeight;
- // Position of EXIF package, default value is -1 which means no EXIF package appears.
- size_t mExifPos;
};
} /* namespace android::ultrahdr */
diff --git a/libs/ultrahdr/include/ultrahdr/jpegr.h b/libs/ultrahdr/include/ultrahdr/jpegr.h
index 1f9bd0f930..9546ca4762 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegr.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegr.h
@@ -125,7 +125,7 @@ public:
*
* Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append
* the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same
- * resolution.
+ * resolution. SDR input is assumed to use the sRGB transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param hdr_tf transfer function of the HDR image
@@ -152,7 +152,9 @@ public:
* This method requires HAL Hardware JPEG encoder.
*
* Generate gain map from the HDR and SDR inputs, append the gain map to the end of the
- * compressed JPEG. HDR and SDR inputs must be the same resolution and color space.
+ * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and
+ * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB
+ * transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* Note: the SDR image must be the decoded version of the JPEG
@@ -178,8 +180,9 @@ public:
* This method requires HAL Hardware JPEG encoder.
*
* Decode the compressed 8-bit JPEG image to YUV SDR, generate gain map from the HDR input
- * and the decoded SDR result, append the gain map to the end of the compressed JPEG. HDR
- * and SDR inputs must be the same resolution.
+ * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an
+ * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same
+ * resolution. JPEG image is assumed to use the sRGB transfer function.
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param hdr_tf transfer function of the HDR image
@@ -198,7 +201,8 @@ public:
* Encode API-4
* Assemble JPEGR image from SDR JPEG and gainmap JPEG.
*
- * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format.
+ * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC
+ * profile if one isn't present in the input JPEG image.
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param compressed_gainmap compressed 8-bit JPEG single channel image
* @param metadata metadata to be written in XMP of the primary jpeg
@@ -217,6 +221,9 @@ public:
* Decode API
* Decompress JPEGR image.
*
+ * This method assumes that the JPEGR image contains an ICC profile with primaries that match
+ * those of a color gamut that this library is aware of; Bt.709, Display-P3, or Bt.2100.
+ *
* @param compressed_jpegr_image compressed JPEGR image.
* @param dest destination of the uncompressed JPEGR image.
* @param max_display_boost (optional) the maximum available boost supported by a display,
@@ -270,26 +277,30 @@ protected:
/*
* This method is called in the encoding pipeline. It will take the uncompressed 8-bit and
* 10-bit yuv images as input, and calculate the uncompressed gain map. The input images
- * must be the same resolution.
+ * must be the same resolution. The SDR input is assumed to use the sRGB transfer function.
*
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
* @param hdr_tf transfer function of the HDR image
* @param dest gain map; caller responsible for memory of data
* @param metadata max_content_boost is filled in
+ * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut
* @return NO_ERROR if calculation succeeds, error code if error occurs.
*/
status_t generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
jr_uncompressed_ptr uncompressed_p010_image,
ultrahdr_transfer_function hdr_tf,
ultrahdr_metadata_ptr metadata,
- jr_uncompressed_ptr dest);
+ jr_uncompressed_ptr dest,
+ bool sdr_is_601 = false);
/*
* This method is called in the decoding pipeline. It will take the uncompressed (decoded)
* 8-bit yuv image, the uncompressed (decoded) gain map, and extracted JPEG/R metadata as
* input, and calculate the 10-bit recovered image. The recovered output image is the same
* color gamut as the SDR image, with HLG transfer function, and is in RGBA1010102 data format.
+ * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to
+ * be a decoded JPEG for the purpose of YUV interpration.
*
* @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
* @param uncompressed_gain_map uncompressed gain map
@@ -353,6 +364,8 @@ private:
* @param compressed_jpeg_image compressed 8-bit JPEG image
* @param compress_gain_map compressed recover map
* @param (nullable) exif EXIF package
+ * @param (nullable) icc ICC package
+ * @param icc_size length in bytes of ICC package
* @param metadata JPEG/R metadata to encode in XMP of the jpeg
* @param dest compressed JPEGR image
* @return NO_ERROR if calculation succeeds, error code if error occurs.
@@ -360,6 +373,7 @@ private:
status_t appendGainMap(jr_compressed_ptr compressed_jpeg_image,
jr_compressed_ptr compressed_gain_map,
jr_exif_ptr exif,
+ void* icc, size_t icc_size,
ultrahdr_metadata_ptr metadata,
jr_compressed_ptr dest);
@@ -374,6 +388,22 @@ private:
jr_uncompressed_ptr dest);
/*
+ * This method will convert a YUV420 image from one YUV encoding to another in-place (eg.
+ * Bt.709 to Bt.601 YUV encoding).
+ *
+ * src_encoding and dest_encoding indicate the encoding via the YUV conversion defined for that
+ * gamut. P3 indicates Rec.601, since this is how DataSpace encodes Display-P3 YUV data.
+ *
+ * @param image the YUV420 image to convert
+ * @param src_encoding input YUV encoding
+ * @param dest_encoding output YUV encoding
+ * @return NO_ERROR if calculation succeeds, error code if error occurs.
+ */
+ status_t convertYuv(jr_uncompressed_ptr image,
+ ultrahdr_color_gamut src_encoding,
+ ultrahdr_color_gamut dest_encoding);
+
+ /*
* This method will check the validity of the input arguments.
*
* @param uncompressed_p010_image uncompressed HDR image in P010 color format
diff --git a/libs/ultrahdr/jpegdecoderhelper.cpp b/libs/ultrahdr/jpegdecoderhelper.cpp
index 0bad4a4de0..fef544452a 100644
--- a/libs/ultrahdr/jpegdecoderhelper.cpp
+++ b/libs/ultrahdr/jpegdecoderhelper.cpp
@@ -93,7 +93,6 @@ static void jpegrerror_exit(j_common_ptr cinfo) {
}
JpegDecoderHelper::JpegDecoderHelper() {
- mExifPos = 0;
}
JpegDecoderHelper::~JpegDecoderHelper() {
@@ -138,6 +137,14 @@ size_t JpegDecoderHelper::getEXIFSize() {
return mEXIFBuffer.size();
}
+void* JpegDecoderHelper::getICCPtr() {
+ return mICCBuffer.data();
+}
+
+size_t JpegDecoderHelper::getICCSize() {
+ return mICCBuffer.size();
+}
+
size_t JpegDecoderHelper::getDecompressedImageWidth() {
return mWidth;
}
@@ -168,31 +175,21 @@ bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA)
cinfo.src = &mgr;
jpeg_read_header(&cinfo, TRUE);
- // Save XMP data and EXIF data.
- // Here we only handle the first XMP / EXIF package.
- // The parameter pos is used for capturing start offset of EXIF, which is hacky, but working...
+ // Save XMP data, EXIF data, and ICC data.
+ // Here we only handle the first XMP / EXIF / ICC package.
// We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package),
// two bytes of package length which is stored in marker->original_length, and the real data
- // which is stored in marker->data. The pos is adding up all previous package lengths (
- // 4 bytes marker and length, marker->original_length) before EXIF appears. Note that here we
- // we are using marker->original_length instead of marker->data_length because in case the real
- // package length is larger than the limitation, jpeg-turbo will only copy the data within the
- // limitation (represented by data_length) and this may vary from original_length / real offset.
- // A better solution is making jpeg_marker_struct holding the offset, but currently it doesn't.
+ // which is stored in marker->data.
bool exifAppears = false;
bool xmpAppears = false;
- size_t pos = 2; // position after SOI
+ bool iccAppears = false;
for (jpeg_marker_struct* marker = cinfo.marker_list;
- marker && !(exifAppears && xmpAppears);
+ marker && !(exifAppears && xmpAppears && iccAppears);
marker = marker->next) {
- pos += 4;
- pos += marker->original_length;
-
- if (marker->marker != kAPP1Marker) {
+ if (marker->marker != kAPP1Marker && marker->marker != kAPP2Marker) {
continue;
}
-
const unsigned int len = marker->data_length;
if (!xmpAppears &&
len > kXmpNameSpace.size() &&
@@ -210,7 +207,12 @@ bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA)
mEXIFBuffer.resize(len, 0);
memcpy(static_cast<void*>(mEXIFBuffer.data()), marker->data, len);
exifAppears = true;
- mExifPos = pos - marker->original_length;
+ } else if (!iccAppears &&
+ len > sizeof(kICCSig) &&
+ !memcmp(marker->data, kICCSig, sizeof(kICCSig))) {
+ mICCBuffer.resize(len, 0);
+ memcpy(static_cast<void*>(mICCBuffer.data()), marker->data, len);
+ iccAppears = true;
}
}
@@ -228,6 +230,7 @@ bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA)
if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
// We don't intend to support decoding grayscale to RGBA
status = false;
+ ALOGE("%s: decoding grayscale to RGBA is unsupported", __func__);
goto CleanUp;
}
// 4 bytes per pixel
@@ -242,6 +245,7 @@ bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA)
cinfo.comp_info[1].v_samp_factor != 1 ||
cinfo.comp_info[2].v_samp_factor != 1) {
status = false;
+ ALOGE("%s: decoding to YUV only supports 4:2:0 subsampling", __func__);
goto CleanUp;
}
mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0);
@@ -304,8 +308,12 @@ bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int leng
return false;
}
- *pWidth = cinfo.image_width;
- *pHeight = cinfo.image_height;
+ if (pWidth != nullptr) {
+ *pWidth = cinfo.image_width;
+ }
+ if (pHeight != nullptr) {
+ *pHeight = cinfo.image_height;
+ }
if (iccData != nullptr) {
for (jpeg_marker_struct* marker = cinfo.marker_list; marker;
@@ -318,9 +326,7 @@ bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int leng
continue;
}
- const unsigned int len = marker->data_length - kICCMarkerHeaderSize;
- const uint8_t *src = marker->data + kICCMarkerHeaderSize;
- iccData->insert(iccData->end(), src, src+len);
+ iccData->insert(iccData->end(), marker->data, marker->data + marker->data_length);
}
}
diff --git a/libs/ultrahdr/jpegr.cpp b/libs/ultrahdr/jpegr.cpp
index 415255d4ea..9af5af75e5 100644
--- a/libs/ultrahdr/jpegr.cpp
+++ b/libs/ultrahdr/jpegr.cpp
@@ -258,6 +258,10 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
uncompressed_yuv_420_image.colorGamut);
+ // Convert to Bt601 YUV encoding for JPEG encode
+ JPEGR_CHECK(convertYuv(&uncompressed_yuv_420_image, uncompressed_yuv_420_image.colorGamut,
+ ULTRAHDR_COLORGAMUT_P3));
+
JpegEncoderHelper jpeg_encoder;
if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image.data,
uncompressed_yuv_420_image.width,
@@ -269,7 +273,9 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
jpeg.data = jpeg_encoder.getCompressedImagePtr();
jpeg.length = jpeg_encoder.getCompressedImageSize();
- JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, &metadata, dest));
+ // No ICC since JPEG encode already did it
+ JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+ &metadata, dest));
return NO_ERROR;
}
@@ -317,10 +323,22 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
uncompressed_yuv_420_image->colorGamut);
+ // Convert to Bt601 YUV encoding for JPEG encode; make a copy so as to no clobber client data
+ unique_ptr<uint8_t[]> yuv_420_bt601_data = make_unique<uint8_t[]>(
+ uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+ memcpy(yuv_420_bt601_data.get(), uncompressed_yuv_420_image->data,
+ uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+
+ jpegr_uncompressed_struct yuv_420_bt601_image = {
+ yuv_420_bt601_data.get(), uncompressed_yuv_420_image->width, uncompressed_yuv_420_image->height,
+ uncompressed_yuv_420_image->colorGamut };
+ JPEGR_CHECK(convertYuv(&yuv_420_bt601_image, yuv_420_bt601_image.colorGamut,
+ ULTRAHDR_COLORGAMUT_P3));
+
JpegEncoderHelper jpeg_encoder;
- if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image->data,
- uncompressed_yuv_420_image->width,
- uncompressed_yuv_420_image->height, quality,
+ if (!jpeg_encoder.compressImage(yuv_420_bt601_image.data,
+ yuv_420_bt601_image.width,
+ yuv_420_bt601_image.height, quality,
icc->getData(), icc->getLength())) {
return ERROR_JPEGR_ENCODE_ERROR;
}
@@ -328,7 +346,9 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
jpeg.data = jpeg_encoder.getCompressedImagePtr();
jpeg.length = jpeg_encoder.getCompressedImageSize();
- JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, &metadata, dest));
+ // No ICC since jpeg encode already did it
+ JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+ &metadata, dest));
return NO_ERROR;
}
@@ -371,7 +391,24 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, nullptr, &metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ uncompressed_yuv_420_image->colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), &metadata, dest));
+ }
return NO_ERROR;
}
@@ -392,6 +429,7 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
return ret;
}
+ // Note: output is Bt.601 YUV encoded regardless of gamut, due to jpeg decode.
JpegDecoderHelper jpeg_decoder;
if (!jpeg_decoder.decompressImage(compressed_jpeg_image->data, compressed_jpeg_image->length)) {
return ERROR_JPEGR_DECODE_ERROR;
@@ -411,8 +449,10 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
metadata.version = kJpegrVersion;
jpegr_uncompressed_struct map;
+ // Indicate that the SDR image is Bt.601 YUV encoded.
JPEGR_CHECK(generateGainMap(
- &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+ &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map,
+ true /* sdr_is_601 */ ));
std::unique_ptr<uint8_t[]> map_data;
map_data.reset(reinterpret_cast<uint8_t*>(map.data));
@@ -424,7 +464,24 @@ status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, nullptr, &metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ uncompressed_yuv_420_image.colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), &metadata, dest));
+ }
return NO_ERROR;
}
@@ -449,8 +506,25 @@ status_t JpegR::encodeJPEGR(jr_compressed_ptr compressed_jpeg_image,
return ERROR_JPEGR_INVALID_NULL_PTR;
}
- JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
- metadata, dest));
+ // We just want to check if ICC is present, so don't do a full decode. Note,
+ // this doesn't verify that the ICC is valid.
+ JpegDecoderHelper decoder;
+ std::vector<uint8_t> icc;
+ decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+ /* pWidth */ nullptr, /* pHeight */ nullptr,
+ &icc, /* exifData */ nullptr);
+
+ // Add ICC if not already present.
+ if (icc.size() > 0) {
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+ /* icc */ nullptr, /* icc size */ 0, metadata, dest));
+ } else {
+ sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ compressed_jpeg_image->colorGamut);
+ JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+ newIcc->getData(), newIcc->getLength(), metadata, dest));
+ }
+
return NO_ERROR;
}
@@ -613,6 +687,9 @@ status_t JpegR::decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
+ uncompressed_yuv_420_image.colorGamut = IccHelper::readIccColorGamut(
+ jpeg_decoder.getICCPtr(), jpeg_decoder.getICCSize());
+
JPEGR_CHECK(applyGainMap(&uncompressed_yuv_420_image, &map, &uhdr_metadata, output_format,
max_display_boost, dest));
return NO_ERROR;
@@ -624,6 +701,7 @@ status_t JpegR::compressGainMap(jr_uncompressed_ptr uncompressed_gain_map,
return ERROR_JPEGR_INVALID_NULL_PTR;
}
+ // Don't need to convert YUV to Bt601 since single channel
if (!jpeg_encoder->compressImage(uncompressed_gain_map->data,
uncompressed_gain_map->width,
uncompressed_gain_map->height,
@@ -699,7 +777,8 @@ status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
jr_uncompressed_ptr uncompressed_p010_image,
ultrahdr_transfer_function hdr_tf,
ultrahdr_metadata_ptr metadata,
- jr_uncompressed_ptr dest) {
+ jr_uncompressed_ptr dest,
+ bool sdr_is_601) {
if (uncompressed_yuv_420_image == nullptr
|| uncompressed_p010_image == nullptr
|| metadata == nullptr
@@ -768,15 +847,38 @@ status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
uncompressed_yuv_420_image->colorGamut, uncompressed_p010_image->colorGamut);
ColorCalculationFn luminanceFn = nullptr;
+ ColorTransformFn sdrYuvToRgbFn = nullptr;
switch (uncompressed_yuv_420_image->colorGamut) {
case ULTRAHDR_COLORGAMUT_BT709:
luminanceFn = srgbLuminance;
+ sdrYuvToRgbFn = srgbYuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_P3:
luminanceFn = p3Luminance;
+ sdrYuvToRgbFn = p3YuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_BT2100:
luminanceFn = bt2100Luminance;
+ sdrYuvToRgbFn = bt2100YuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+ // Should be impossible to hit after input validation.
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ if (sdr_is_601) {
+ sdrYuvToRgbFn = p3YuvToRgb;
+ }
+
+ ColorTransformFn hdrYuvToRgbFn = nullptr;
+ switch (uncompressed_p010_image->colorGamut) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ hdrYuvToRgbFn = srgbYuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ hdrYuvToRgbFn = p3YuvToRgb;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ hdrYuvToRgbFn = bt2100YuvToRgb;
break;
case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
// Should be impossible to hit after input validation.
@@ -790,8 +892,8 @@ status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
std::function<void()> generateMap = [uncompressed_yuv_420_image, uncompressed_p010_image,
metadata, dest, hdrInvOetf, hdrGamutConversionFn,
- luminanceFn, hdr_white_nits, log2MinBoost, log2MaxBoost,
- &jobQueue]() -> void {
+ luminanceFn, sdrYuvToRgbFn, hdrYuvToRgbFn, hdr_white_nits,
+ log2MinBoost, log2MaxBoost, &jobQueue]() -> void {
size_t rowStart, rowEnd;
size_t dest_map_width = uncompressed_yuv_420_image->width / kMapDimensionScaleFactor;
size_t dest_map_stride = dest->width;
@@ -800,7 +902,8 @@ status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
for (size_t x = 0; x < dest_map_width; ++x) {
Color sdr_yuv_gamma =
sampleYuv420(uncompressed_yuv_420_image, kMapDimensionScaleFactor, x, y);
- Color sdr_rgb_gamma = srgbYuvToRgb(sdr_yuv_gamma);
+ Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma);
+ // We are assuming the SDR input is always sRGB transfer.
#if USE_SRGB_INVOETF_LUT
Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma);
#else
@@ -809,7 +912,7 @@ status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;
Color hdr_yuv_gamma = sampleP010(uncompressed_p010_image, kMapDimensionScaleFactor, x, y);
- Color hdr_rgb_gamma = bt2100YuvToRgb(hdr_yuv_gamma);
+ Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma);
Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
hdr_rgb = hdrGamutConversionFn(hdr_rgb);
float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;
@@ -887,7 +990,9 @@ status_t JpegR::applyGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
for (size_t y = rowStart; y < rowEnd; ++y) {
for (size_t x = 0; x < width; ++x) {
Color yuv_gamma_sdr = getYuv420Pixel(uncompressed_yuv_420_image, x, y);
- Color rgb_gamma_sdr = srgbYuvToRgb(yuv_gamma_sdr);
+ // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients
+ Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr);
+ // We are assuming the SDR base image is always sRGB transfer.
#if USE_SRGB_INVOETF_LUT
Color rgb_sdr = srgbInvOetfLUT(rgb_gamma_sdr);
#else
@@ -1065,6 +1170,7 @@ status_t JpegR::extractGainMap(jr_compressed_ptr compressed_jpegr_image,
status_t JpegR::appendGainMap(jr_compressed_ptr compressed_jpeg_image,
jr_compressed_ptr compressed_gain_map,
jr_exif_ptr exif,
+ void* icc, size_t icc_size,
ultrahdr_metadata_ptr metadata,
jr_compressed_ptr dest) {
if (compressed_jpeg_image == nullptr
@@ -1128,6 +1234,18 @@ status_t JpegR::appendGainMap(jr_compressed_ptr compressed_jpeg_image,
JPEGR_CHECK(Write(dest, (void*)xmp_primary.c_str(), xmp_primary.size(), pos));
}
+ // Write ICC
+ if (icc != nullptr && icc_size > 0) {
+ const int length = icc_size + 2;
+ const uint8_t lengthH = ((length >> 8) & 0xff);
+ const uint8_t lengthL = (length & 0xff);
+ JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+ JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+ JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+ JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+ JPEGR_CHECK(Write(dest, icc, icc_size, pos));
+ }
+
// Prepare and write MPF
{
const int length = 2 + calculateMpfSize();
@@ -1235,4 +1353,82 @@ status_t JpegR::toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest) {
return NO_ERROR;
}
+status_t JpegR::convertYuv(jr_uncompressed_ptr image,
+ ultrahdr_color_gamut src_encoding,
+ ultrahdr_color_gamut dest_encoding) {
+ if (image == nullptr) {
+ return ERROR_JPEGR_INVALID_NULL_PTR;
+ }
+
+ if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED
+ || dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ ColorTransformFn conversionFn = nullptr;
+ switch (src_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ return NO_ERROR;
+ case ULTRAHDR_COLORGAMUT_P3:
+ conversionFn = yuv709To601;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ conversionFn = yuv709To2100;
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ conversionFn = yuv601To709;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ return NO_ERROR;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ conversionFn = yuv601To2100;
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ switch (dest_encoding) {
+ case ULTRAHDR_COLORGAMUT_BT709:
+ conversionFn = yuv2100To709;
+ break;
+ case ULTRAHDR_COLORGAMUT_P3:
+ conversionFn = yuv2100To601;
+ break;
+ case ULTRAHDR_COLORGAMUT_BT2100:
+ return NO_ERROR;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+ break;
+ default:
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ if (conversionFn == nullptr) {
+ // Should be impossible to hit after input validation
+ return ERROR_JPEGR_INVALID_COLORGAMUT;
+ }
+
+ for (size_t y = 0; y < image->height / 2; ++y) {
+ for (size_t x = 0; x < image->width / 2; ++x) {
+ transformYuv420(image, x, y, conversionFn);
+ }
+ }
+
+ return NO_ERROR;
+}
+
} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/tests/Android.bp b/libs/ultrahdr/tests/Android.bp
index 7dd9d04fbd..594413018c 100644
--- a/libs/ultrahdr/tests/Android.bp
+++ b/libs/ultrahdr/tests/Android.bp
@@ -25,8 +25,9 @@ cc_test {
name: "libultrahdr_test",
test_suites: ["device-tests"],
srcs: [
- "jpegr_test.cpp",
"gainmapmath_test.cpp",
+ "icchelper_test.cpp",
+ "jpegr_test.cpp",
],
shared_libs: [
"libimage_io",
@@ -72,5 +73,7 @@ cc_test {
static_libs: [
"libgtest",
"libjpegdecoder",
+ "libultrahdr",
+ "libutils",
],
}
diff --git a/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
new file mode 100644
index 0000000000..f61e0e8525
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/gainmapmath_test.cpp b/libs/ultrahdr/tests/gainmapmath_test.cpp
index c456653821..af90365e56 100644
--- a/libs/ultrahdr/tests/gainmapmath_test.cpp
+++ b/libs/ultrahdr/tests/gainmapmath_test.cpp
@@ -28,6 +28,7 @@ public:
float ComparisonEpsilon() { return 1e-4f; }
float LuminanceEpsilon() { return 1e-2f; }
+ float YuvConversionEpsilon() { return 1.0f / (255.0f * 2.0f); }
Color Yuv420(uint8_t y, uint8_t u, uint8_t v) {
return {{{ static_cast<float>(y) / 255.0f,
@@ -63,9 +64,13 @@ public:
Color YuvBlack() { return {{{ 0.0f, 0.0f, 0.0f }}}; }
Color YuvWhite() { return {{{ 1.0f, 0.0f, 0.0f }}}; }
- Color SrgbYuvRed() { return {{{ 0.299f, -0.1687f, 0.5f }}}; }
- Color SrgbYuvGreen() { return {{{ 0.587f, -0.3313f, -0.4187f }}}; }
- Color SrgbYuvBlue() { return {{{ 0.114f, 0.5f, -0.0813f }}}; }
+ Color SrgbYuvRed() { return {{{ 0.2126f, -0.11457f, 0.5f }}}; }
+ Color SrgbYuvGreen() { return {{{ 0.7152f, -0.38543f, -0.45415f }}}; }
+ Color SrgbYuvBlue() { return {{{ 0.0722f, 0.5f, -0.04585f }}}; }
+
+ Color P3YuvRed() { return {{{ 0.299f, -0.16874f, 0.5f }}}; }
+ Color P3YuvGreen() { return {{{ 0.587f, -0.33126f, -0.41869f }}}; }
+ Color P3YuvBlue() { return {{{ 0.114f, 0.5f, -0.08131f }}}; }
Color Bt2100YuvRed() { return {{{ 0.2627f, -0.13963f, 0.5f }}}; }
Color Bt2100YuvGreen() { return {{{ 0.6780f, -0.36037f, -0.45979f }}}; }
@@ -78,6 +83,13 @@ public:
return luminance_scaled * kSdrWhiteNits;
}
+ float P3YuvToLuminance(Color yuv_gamma, ColorCalculationFn luminanceFn) {
+ Color rgb_gamma = p3YuvToRgb(yuv_gamma);
+ Color rgb = srgbInvOetf(rgb_gamma);
+ float luminance_scaled = luminanceFn(rgb);
+ return luminance_scaled * kSdrWhiteNits;
+ }
+
float Bt2100YuvToLuminance(Color yuv_gamma, ColorTransformFn hdrInvOetf,
ColorTransformFn gamutConversionFn, ColorCalculationFn luminanceFn,
float scale_factor) {
@@ -402,6 +414,56 @@ TEST_F(GainMapMathTest, P3Luminance) {
EXPECT_FLOAT_EQ(p3Luminance(RgbBlue()), 0.06891f);
}
+TEST_F(GainMapMathTest, P3YuvToRgb) {
+ Color rgb_black = p3YuvToRgb(YuvBlack());
+ EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+ Color rgb_white = p3YuvToRgb(YuvWhite());
+ EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+ Color rgb_r = p3YuvToRgb(P3YuvRed());
+ EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+ Color rgb_g = p3YuvToRgb(P3YuvGreen());
+ EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+ Color rgb_b = p3YuvToRgb(P3YuvBlue());
+ EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbToYuv) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv_black, YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv_white, YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv_r, P3YuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv_g, P3YuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv_b, P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbYuvRoundtrip) {
+ Color rgb_black = p3YuvToRgb(p3RgbToYuv(RgbBlack()));
+ EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+ Color rgb_white = p3YuvToRgb(p3RgbToYuv(RgbWhite()));
+ EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+ Color rgb_r = p3YuvToRgb(p3RgbToYuv(RgbRed()));
+ EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+ Color rgb_g = p3YuvToRgb(p3RgbToYuv(RgbGreen()));
+ EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+ Color rgb_b = p3YuvToRgb(p3RgbToYuv(RgbBlue()));
+ EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
TEST_F(GainMapMathTest, Bt2100Luminance) {
EXPECT_FLOAT_EQ(bt2100Luminance(RgbBlack()), 0.0f);
EXPECT_FLOAT_EQ(bt2100Luminance(RgbWhite()), 1.0f);
@@ -461,6 +523,163 @@ TEST_F(GainMapMathTest, Bt2100RgbYuvRoundtrip) {
EXPECT_RGB_NEAR(rgb_b, RgbBlue());
}
+TEST_F(GainMapMathTest, Bt709ToBt601YuvConversion) {
+ Color yuv_black = srgbRgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_black), YuvBlack());
+
+ Color yuv_white = srgbRgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_white), YuvWhite());
+
+ Color yuv_r = srgbRgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_r), P3YuvRed());
+
+ Color yuv_g = srgbRgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_g), P3YuvGreen());
+
+ Color yuv_b = srgbRgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv709To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt709ToBt2100YuvConversion) {
+ Color yuv_black = srgbRgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_black), YuvBlack());
+
+ Color yuv_white = srgbRgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_white), YuvWhite());
+
+ Color yuv_r = srgbRgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_r), Bt2100YuvRed());
+
+ Color yuv_g = srgbRgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_g), Bt2100YuvGreen());
+
+ Color yuv_b = srgbRgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv709To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt709YuvConversion) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_black), YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_white), YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_r), SrgbYuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_g), SrgbYuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv601To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt2100YuvConversion) {
+ Color yuv_black = p3RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_black), YuvBlack());
+
+ Color yuv_white = p3RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_white), YuvWhite());
+
+ Color yuv_r = p3RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_r), Bt2100YuvRed());
+
+ Color yuv_g = p3RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_g), Bt2100YuvGreen());
+
+ Color yuv_b = p3RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv601To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt709YuvConversion) {
+ Color yuv_black = bt2100RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_black), YuvBlack());
+
+ Color yuv_white = bt2100RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_white), YuvWhite());
+
+ Color yuv_r = bt2100RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_r), SrgbYuvRed());
+
+ Color yuv_g = bt2100RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_g), SrgbYuvGreen());
+
+ Color yuv_b = bt2100RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv2100To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt601YuvConversion) {
+ Color yuv_black = bt2100RgbToYuv(RgbBlack());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_black), YuvBlack());
+
+ Color yuv_white = bt2100RgbToYuv(RgbWhite());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_white), YuvWhite());
+
+ Color yuv_r = bt2100RgbToYuv(RgbRed());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_r), P3YuvRed());
+
+ Color yuv_g = bt2100RgbToYuv(RgbGreen());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_g), P3YuvGreen());
+
+ Color yuv_b = bt2100RgbToYuv(RgbBlue());
+ EXPECT_YUV_NEAR(yuv2100To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, TransformYuv420) {
+ ColorTransformFn transforms[] = { yuv709To601, yuv709To2100, yuv601To709, yuv601To2100,
+ yuv2100To709, yuv2100To601 };
+ for (const ColorTransformFn& transform : transforms) {
+ jpegr_uncompressed_struct input = Yuv420Image();
+
+ size_t out_buf_size = input.width * input.height * 3 / 2;
+ std::unique_ptr<uint8_t[]> out_buf = std::make_unique<uint8_t[]>(out_buf_size);
+ memcpy(out_buf.get(), input.data, out_buf_size);
+ jpegr_uncompressed_struct output = Yuv420Image();
+ output.data = out_buf.get();
+
+ transformYuv420(&output, 1, 1, transform);
+
+ for (size_t y = 0; y < 4; ++y) {
+ for (size_t x = 0; x < 4; ++x) {
+ // Skip the last chroma sample, which we modified above
+ if (x >= 2 && y >= 2) {
+ continue;
+ }
+
+ // All other pixels should remain unchanged
+ EXPECT_YUV_EQ(getYuv420Pixel(&input, x, y), getYuv420Pixel(&output, x, y));
+ }
+ }
+
+ // modified pixels should be updated as intended by the transformYuv420 algorithm
+ Color in1 = getYuv420Pixel(&input, 2, 2);
+ Color in2 = getYuv420Pixel(&input, 3, 2);
+ Color in3 = getYuv420Pixel(&input, 2, 3);
+ Color in4 = getYuv420Pixel(&input, 3, 3);
+ Color out1 = getYuv420Pixel(&output, 2, 2);
+ Color out2 = getYuv420Pixel(&output, 3, 2);
+ Color out3 = getYuv420Pixel(&output, 2, 3);
+ Color out4 = getYuv420Pixel(&output, 3, 3);
+
+ EXPECT_NEAR(transform(in1).y, out1.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in2).y, out2.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in3).y, out3.y, YuvConversionEpsilon());
+ EXPECT_NEAR(transform(in4).y, out4.y, YuvConversionEpsilon());
+
+ Color expect_uv = (transform(in1) + transform(in2) + transform(in3) + transform(in4)) / 4.0f;
+
+ EXPECT_NEAR(expect_uv.u, out1.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out2.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out3.u, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.u, out4.u, YuvConversionEpsilon());
+
+ EXPECT_NEAR(expect_uv.v, out1.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out2.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out3.v, YuvConversionEpsilon());
+ EXPECT_NEAR(expect_uv.v, out4.v, YuvConversionEpsilon());
+ }
+}
+
TEST_F(GainMapMathTest, HlgOetf) {
EXPECT_FLOAT_EQ(hlgOetf(0.0f), 0.0f);
EXPECT_NEAR(hlgOetf(0.04167f), 0.35357f, ComparisonEpsilon());
@@ -693,7 +912,7 @@ TEST_F(GainMapMathTest, ColorConversionLookup) {
TEST_F(GainMapMathTest, EncodeGain) {
ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
- .minContentBoost = 1.0f / 4.0f };
+ .minContentBoost = 1.0f / 4.0f };
EXPECT_EQ(encodeGain(0.0f, 0.0f, &metadata), 127);
EXPECT_EQ(encodeGain(0.0f, 1.0f, &metadata), 127);
@@ -751,7 +970,7 @@ TEST_F(GainMapMathTest, EncodeGain) {
TEST_F(GainMapMathTest, ApplyGain) {
ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
- .minContentBoost = 1.0f / 4.0f };
+ .minContentBoost = 1.0f / 4.0f };
float displayBoost = metadata.maxContentBoost;
EXPECT_RGB_NEAR(applyGain(RgbBlack(), 0.0f, &metadata), RgbBlack());
diff --git a/libs/ultrahdr/tests/icchelper_test.cpp b/libs/ultrahdr/tests/icchelper_test.cpp
new file mode 100644
index 0000000000..ff61c08574
--- /dev/null
+++ b/libs/ultrahdr/tests/icchelper_test.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <ultrahdr/icc.h>
+#include <ultrahdr/ultrahdr.h>
+#include <utils/Log.h>
+
+namespace android::ultrahdr {
+
+class IccHelperTest : public testing::Test {
+public:
+ IccHelperTest();
+ ~IccHelperTest();
+protected:
+ virtual void SetUp();
+ virtual void TearDown();
+};
+
+IccHelperTest::IccHelperTest() {}
+
+IccHelperTest::~IccHelperTest() {}
+
+void IccHelperTest::SetUp() {}
+
+void IccHelperTest::TearDown() {}
+
+TEST_F(IccHelperTest, iccWriteThenRead) {
+ sp<DataStruct> iccBt709 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ ULTRAHDR_COLORGAMUT_BT709);
+ ASSERT_NE(iccBt709->getLength(), 0);
+ ASSERT_NE(iccBt709->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccBt709->getData(), iccBt709->getLength()),
+ ULTRAHDR_COLORGAMUT_BT709);
+
+ sp<DataStruct> iccP3 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_P3);
+ ASSERT_NE(iccP3->getLength(), 0);
+ ASSERT_NE(iccP3->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccP3->getData(), iccP3->getLength()),
+ ULTRAHDR_COLORGAMUT_P3);
+
+ sp<DataStruct> iccBt2100 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+ ULTRAHDR_COLORGAMUT_BT2100);
+ ASSERT_NE(iccBt2100->getLength(), 0);
+ ASSERT_NE(iccBt2100->getData(), nullptr);
+ EXPECT_EQ(IccHelper::readIccColorGamut(iccBt2100->getData(), iccBt2100->getLength()),
+ ULTRAHDR_COLORGAMUT_BT2100);
+}
+
+TEST_F(IccHelperTest, iccEndianness) {
+ sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_BT709);
+ size_t profile_size = icc->getLength() - kICCIdentifierSize;
+
+ uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc->getData()) + kICCIdentifierSize;
+ uint32_t encoded_size = static_cast<uint32_t>(icc_bytes[0]) << 24 |
+ static_cast<uint32_t>(icc_bytes[1]) << 16 |
+ static_cast<uint32_t>(icc_bytes[2]) << 8 |
+ static_cast<uint32_t>(icc_bytes[3]);
+
+ EXPECT_EQ(static_cast<size_t>(encoded_size), profile_size);
+}
+
+} // namespace android::ultrahdr
+
diff --git a/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
index c79dbe328b..e2da01c373 100644
--- a/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
+++ b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
@@ -15,6 +15,7 @@
*/
#include <ultrahdr/jpegdecoderhelper.h>
+#include <ultrahdr/icc.h>
#include <gtest/gtest.h>
#include <utils/Log.h>
@@ -22,11 +23,19 @@
namespace android::ultrahdr {
+// No ICC or EXIF
#define YUV_IMAGE "/sdcard/Documents/minnie-320x240-yuv.jpg"
#define YUV_IMAGE_SIZE 20193
+// Has ICC and EXIF
+#define YUV_ICC_IMAGE "/sdcard/Documents/minnie-320x240-yuv-icc.jpg"
+#define YUV_ICC_IMAGE_SIZE 34266
+// No ICC or EXIF
#define GREY_IMAGE "/sdcard/Documents/minnie-320x240-y.jpg"
#define GREY_IMAGE_SIZE 20193
+#define IMAGE_WIDTH 320
+#define IMAGE_HEIGHT 240
+
class JpegDecoderHelperTest : public testing::Test {
public:
struct Image {
@@ -39,7 +48,7 @@ protected:
virtual void SetUp();
virtual void TearDown();
- Image mYuvImage, mGreyImage;
+ Image mYuvImage, mYuvIccImage, mGreyImage;
};
JpegDecoderHelperTest::JpegDecoderHelperTest() {}
@@ -79,6 +88,10 @@ void JpegDecoderHelperTest::SetUp() {
FAIL() << "Load file " << YUV_IMAGE << " failed";
}
mYuvImage.size = YUV_IMAGE_SIZE;
+ if (!loadFile(YUV_ICC_IMAGE, &mYuvIccImage)) {
+ FAIL() << "Load file " << YUV_ICC_IMAGE << " failed";
+ }
+ mYuvIccImage.size = YUV_ICC_IMAGE_SIZE;
if (!loadFile(GREY_IMAGE, &mGreyImage)) {
FAIL() << "Load file " << GREY_IMAGE << " failed";
}
@@ -91,6 +104,16 @@ TEST_F(JpegDecoderHelperTest, decodeYuvImage) {
JpegDecoderHelper decoder;
EXPECT_TRUE(decoder.decompressImage(mYuvImage.buffer.get(), mYuvImage.size));
ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+ EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+ ULTRAHDR_COLORGAMUT_UNSPECIFIED);
+}
+
+TEST_F(JpegDecoderHelperTest, decodeYuvIccImage) {
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.decompressImage(mYuvIccImage.buffer.get(), mYuvIccImage.size));
+ ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+ EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+ ULTRAHDR_COLORGAMUT_BT709);
}
TEST_F(JpegDecoderHelperTest, decodeGreyImage) {
@@ -99,4 +122,35 @@ TEST_F(JpegDecoderHelperTest, decodeGreyImage) {
ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
}
-} // namespace android::ultrahdr \ No newline at end of file
+TEST_F(JpegDecoderHelperTest, getCompressedImageParameters) {
+ size_t width = 0, height = 0;
+ std::vector<uint8_t> icc, exif;
+
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvImage.buffer.get(), mYuvImage.size,
+ &width, &height, &icc, &exif));
+
+ EXPECT_EQ(width, IMAGE_WIDTH);
+ EXPECT_EQ(height, IMAGE_HEIGHT);
+ EXPECT_EQ(icc.size(), 0);
+ EXPECT_EQ(exif.size(), 0);
+}
+
+TEST_F(JpegDecoderHelperTest, getCompressedImageParametersIcc) {
+ size_t width = 0, height = 0;
+ std::vector<uint8_t> icc, exif;
+
+ JpegDecoderHelper decoder;
+ EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvIccImage.buffer.get(), mYuvIccImage.size,
+ &width, &height, &icc, &exif));
+
+ EXPECT_EQ(width, IMAGE_WIDTH);
+ EXPECT_EQ(height, IMAGE_HEIGHT);
+ EXPECT_GT(icc.size(), 0);
+ EXPECT_GT(exif.size(), 0);
+
+ EXPECT_EQ(IccHelper::readIccColorGamut(icc.data(), icc.size()),
+ ULTRAHDR_COLORGAMUT_BT709);
+}
+
+} // namespace android::ultrahdr