Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6d909c6 authored by Mark Harman's avatar Mark Harman
Browse files

Improve support for more than 3 image HDR (work in progress).

parent fe460025
Loading
Loading
Loading
Loading
+58 −19
Original line number Diff line number Diff line
@@ -81,6 +81,16 @@ public class HDRProcessor {
		float parameter_A;
		float parameter_B;

		private ResponseFunction(float parameter_A, float parameter_B) {
			this.parameter_A = parameter_A;
			this.parameter_B = parameter_B;
		}

		static ResponseFunction createIdentity() {
			ResponseFunction function = new ResponseFunction(1.0f, 0.0f);
			return function;
		}

		/** Computes the response function.
		 * We pass the context, so this inner class can be made static.
		 * @param x_samples List of Xi samples. Must be at least 3 samples.
@@ -302,7 +312,7 @@ public class HDRProcessor {
			bitmaps = new ArrayList<>(bitmaps);
		}
		int n_bitmaps = bitmaps.size();
		if( n_bitmaps != 1 && n_bitmaps != 3 ) {
		if( n_bitmaps != 1 && n_bitmaps != 3 /*&& n_bitmaps != 5*/ ) {
			if( MyDebug.LOG )
				Log.e(TAG, "n_bitmaps not supported: " + n_bitmaps);
			throw new HDRProcessorException(HDRProcessorException.INVALID_N_IMAGES);
@@ -500,10 +510,11 @@ public class HDRProcessor {
		}
		if( MyDebug.LOG )
			Log.d(TAG, "### time after creating allocations from bitmaps: " + (System.currentTimeMillis() - time_s));
		final int base_bitmap = (n_bitmaps - 1) / 2; // index of the bitmap with the base exposure and offsets

		// perform auto-alignment
		// if assume_sorted if false, this function will also sort the allocations and bitmaps from darkest to brightest.
		BrightnessDetails brightnessDetails = autoAlignment(offsets_x, offsets_y, allocations, width, height, bitmaps, 1, assume_sorted, sort_cb, true, false, time_s);
		BrightnessDetails brightnessDetails = autoAlignment(offsets_x, offsets_y, allocations, width, height, bitmaps, base_bitmap, assume_sorted, sort_cb, true, false, time_s);
		int median_brightness = brightnessDetails.median_brightness;
		if( MyDebug.LOG ) {
			Log.d(TAG, "### time after autoAlignment: " + (System.currentTimeMillis() - time_s));
@@ -511,12 +522,15 @@ public class HDRProcessor {
		}

		// compute response_functions
		final int base_bitmap = (n_bitmaps-1)/2; // index of the bitmap with the base exposure and offsets
		for(int i=0;i<n_bitmaps;i++) {
			ResponseFunction function = null;
			if( i != base_bitmap ) {
				function = createFunctionFromBitmaps(i, bitmaps.get(i), bitmaps.get(base_bitmap), offsets_x[i], offsets_y[i]);
			}
			else if( n_bitmaps > 3 ) {
				// for more than 3 bitmaps, need to still create the identity response function
				function = ResponseFunction.createIdentity();
			}
			response_functions[i] = function;
		}
		if( MyDebug.LOG )
@@ -577,6 +591,23 @@ public class HDRProcessor {
		processHDRScript.set_parameter_A2(response_functions[2].parameter_A);
		processHDRScript.set_parameter_B2(response_functions[2].parameter_B);

		if( n_bitmaps > 3 ) {
			processHDRScript.set_offset_x1(offsets_x[1]);
			processHDRScript.set_offset_y1(offsets_y[1]);
			processHDRScript.set_parameter_A1(response_functions[1].parameter_A);
			processHDRScript.set_parameter_B1(response_functions[1].parameter_B);

			processHDRScript.set_offset_x3(offsets_x[3]);
			processHDRScript.set_offset_y3(offsets_y[3]);
			processHDRScript.set_parameter_A3(response_functions[3].parameter_A);
			processHDRScript.set_parameter_B3(response_functions[3].parameter_B);

			processHDRScript.set_offset_x4(offsets_x[4]);
			processHDRScript.set_offset_y4(offsets_y[4]);
			processHDRScript.set_parameter_A4(response_functions[4].parameter_A);
			processHDRScript.set_parameter_B4(response_functions[4].parameter_B);
		}

		// set globals

		// set tonemapping algorithm
@@ -608,7 +639,8 @@ public class HDRProcessor {
				break;
		}

		float max_possible_value = response_functions[0].parameter_A * 255 + response_functions[0].parameter_B;
		//float max_possible_value = response_functions[0].parameter_A * 255 + response_functions[0].parameter_B;
		float max_possible_value = response_functions[base_bitmap - 1].parameter_A * 255 + response_functions[base_bitmap - 1].parameter_B;
		if( MyDebug.LOG )
			Log.d(TAG, "max_possible_value: " + max_possible_value);
		if( max_possible_value < 255.0f ) {
@@ -740,7 +772,14 @@ public class HDRProcessor {
		}
		if( MyDebug.LOG )
			Log.d(TAG, "### time before processHDRScript: " + (System.currentTimeMillis() - time_s));
		processHDRScript.forEach_hdr(allocations[1], output_allocation);
		if( n_bitmaps == 3 )
			processHDRScript.forEach_hdr(allocations[base_bitmap], output_allocation);
		else {
			processHDRScript.set_n_bitmaps_g(n_bitmaps);
			processHDRScript.forEach_hdr_n(allocations[base_bitmap], output_allocation);
		}
		/*processHDRScript.set_n_bitmaps_g(n_bitmaps);
		processHDRScript.forEach_hdr_n(allocations[base_bitmap], output_allocation);*/
		if( MyDebug.LOG )
			Log.d(TAG, "### time after processHDRScript: " + (System.currentTimeMillis() - time_s));

+255 −6
Original line number Diff line number Diff line
@@ -3,10 +3,20 @@
#pragma rs_fp_relaxed

rs_allocation bitmap0;
rs_allocation bitmap1;
rs_allocation bitmap2;
rs_allocation bitmap3;
rs_allocation bitmap4;
rs_allocation bitmap5;
rs_allocation bitmap6;

int offset_x0 = 0, offset_y0 = 0;
int offset_x1 = 0, offset_y1 = 0;
int offset_x2 = 0, offset_y2 = 0;
int offset_x3 = 0, offset_y3 = 0;
int offset_x4 = 0, offset_y4 = 0;
int offset_x5 = 0, offset_y5 = 0;
int offset_x6 = 0, offset_y6 = 0;

float parameter_A0 = 1.0f;
float parameter_B0 = 0.0f;
@@ -14,6 +24,14 @@ float parameter_A1 = 1.0f;
float parameter_B1 = 0.0f;
float parameter_A2 = 1.0f;
float parameter_B2 = 0.0f;
float parameter_A3 = 1.0f;
float parameter_B3 = 0.0f;
float parameter_A4 = 1.0f;
float parameter_B4 = 0.0f;
float parameter_A5 = 1.0f;
float parameter_B5 = 0.0f;
float parameter_A6 = 1.0f;
float parameter_B6 = 0.0f;

const float weight_scale_c = (float)((1.0-1.0/127.5)/127.5);

@@ -84,9 +102,13 @@ static uchar4 tonemap(float3 hdr) {
            float scale = 255.0f / ( tonemap_scale + value );
            scale *= linear_scale;
            // shouldn't need to clamp - linear_scale should be such that values don't map to more than 255
            out.r = (uchar)(scale * hdr.r + 0.5f);
            /*out.r = (uchar)(scale * hdr.r + 0.5f);
            out.g = (uchar)(scale * hdr.g + 0.5f);
            out.b = (uchar)(scale * hdr.b + 0.5f);
            out.b = (uchar)(scale * hdr.b + 0.5f);*/
        	float3 out_f = scale * hdr;
            out.r = (uchar)clamp(out_f.r+0.5f, 0.0f, 255.0f);
            out.g = (uchar)clamp(out_f.g+0.5f, 0.0f, 255.0f);
            out.b = (uchar)clamp(out_f.b+0.5f, 0.0f, 255.0f);
            out.a = 255;
            /*int test_r = (int)(scale * hdr.r + 0.5f);
            int test_g = (int)(scale * hdr.g + 0.5f);
@@ -251,6 +273,10 @@ uchar4 __attribute__((kernel)) hdr(uchar4 in, uint32_t x, uint32_t y) {

			// now look at a neighbour image
			weight = 1.0f - weight;

            // note, whilst it seems tempting to refactor the following cases in the if/else statement
            // into common code (assigning mid_indx+1 or mid_indx-1 to a variable), this results in
            // slower performance!
			if( avg <= 127.5f ) {
                rgb = convert_float3(pixels[mid_indx+1].rgb);
    			/* In some cases it can be that even on the neighbour image, the brightness is too
@@ -312,10 +338,233 @@ uchar4 __attribute__((kernel)) hdr(uchar4 in, uint32_t x, uint32_t y) {
			hdr += weight * rgb;
			sum_weight += weight;
			
			// testing: make all non-safe images black:
			//hdr_r = 0;
			//hdr_g = 0;
			//hdr_b = 0;
			// testing: make all non-safe images purple:
			//hdr.r = 255;
			//hdr.g = 0;
			//hdr.b = 255;
		}
	}

	hdr /= sum_weight;

    uchar4 out = tonemap(hdr);
	return out;
}

int n_bitmaps_g = 3;

uchar4 __attribute__((kernel)) hdr_n(uchar4 in, uint32_t x, uint32_t y) {
    int32_t ix = x;
    int32_t iy = y;
    const int max_bitmaps_c = 7;
	int mid_indx = (n_bitmaps_g-1)/2;
	uchar4 pixels[max_bitmaps_c];

	float parameter_A[max_bitmaps_c];
	float parameter_B[max_bitmaps_c];

    parameter_A[0] = parameter_A0;
    parameter_B[0] = parameter_B0;
    parameter_A[1] = parameter_A1;
    parameter_B[1] = parameter_B1;
    parameter_A[2] = parameter_A2;
    parameter_B[2] = parameter_B2;
    if( n_bitmaps_g > 3 ) {
        parameter_A[3] = parameter_A3;
        parameter_B[3] = parameter_B3;
        parameter_A[4] = parameter_A4;
        parameter_B[4] = parameter_B4;
        if( n_bitmaps_g > 5 ) {
            parameter_A[5] = parameter_A5;
            parameter_B[5] = parameter_B5;
            parameter_A[6] = parameter_A6;
            parameter_B[6] = parameter_B6;
        }
    }

	if( ix+offset_x0 >= 0 && iy+offset_y0 >= 0 && ix+offset_x0 < rsAllocationGetDimX(bitmap0) && iy+offset_y0 < rsAllocationGetDimY(bitmap0) ) {
    	pixels[0] = rsGetElementAt_uchar4(bitmap0, x+offset_x0, y+offset_y0);
	}
	else {
    	pixels[0] = in;
        parameter_A[0] = parameter_A[mid_indx];
        parameter_B[0] = parameter_B[mid_indx];
	}

	if( ix+offset_x1 >= 0 && iy+offset_y1 >= 0 && ix+offset_x1 < rsAllocationGetDimX(bitmap1) && iy+offset_y1 < rsAllocationGetDimY(bitmap1) ) {
    	pixels[1] = rsGetElementAt_uchar4(bitmap1, x+offset_x1, y+offset_y1);
	}
	else {
    	pixels[1] = in;
        parameter_A[1] = parameter_A[mid_indx];
        parameter_B[1] = parameter_B[mid_indx];
	}

 	if( ix+offset_x2 >= 0 && iy+offset_y2 >= 0 && ix+offset_x2 < rsAllocationGetDimX(bitmap2) && iy+offset_y2 < rsAllocationGetDimY(bitmap2) ) {
    	pixels[2] = rsGetElementAt_uchar4(bitmap2, x+offset_x2, y+offset_y2);
	}
	else {
    	pixels[2] = in;
        parameter_A[2] = parameter_A[mid_indx];
        parameter_B[2] = parameter_B[mid_indx];
	}

	if( n_bitmaps_g > 3 ) {
        if( ix+offset_x3 >= 0 && iy+offset_y3 >= 0 && ix+offset_x3 < rsAllocationGetDimX(bitmap3) && iy+offset_y3 < rsAllocationGetDimY(bitmap3) ) {
            pixels[3] = rsGetElementAt_uchar4(bitmap3, x+offset_x3, y+offset_y3);
        }
        else {
            pixels[3] = in;
            parameter_A[3] = parameter_A[mid_indx];
            parameter_B[3] = parameter_B[mid_indx];
        }

        if( ix+offset_x4 >= 0 && iy+offset_y4 >= 0 && ix+offset_x4 < rsAllocationGetDimX(bitmap4) && iy+offset_y4 < rsAllocationGetDimY(bitmap4) ) {
            pixels[4] = rsGetElementAt_uchar4(bitmap4, x+offset_x4, y+offset_y4);
        }
        else {
            pixels[4] = in;
            parameter_A[4] = parameter_A[mid_indx];
            parameter_B[4] = parameter_B[mid_indx];
        }

        if( n_bitmaps_g > 5 ) {
            if( ix+offset_x5 >= 0 && iy+offset_y5 >= 0 && ix+offset_x5 < rsAllocationGetDimX(bitmap5) && iy+offset_y5 < rsAllocationGetDimY(bitmap5) ) {
                pixels[5] = rsGetElementAt_uchar4(bitmap5, x+offset_x5, y+offset_y5);
            }
            else {
                pixels[5] = in;
                parameter_A[5] = parameter_A[mid_indx];
                parameter_B[5] = parameter_B[mid_indx];
            }

            if( ix+offset_x6 >= 0 && iy+offset_y6 >= 0 && ix+offset_x6 < rsAllocationGetDimX(bitmap6) && iy+offset_y6 < rsAllocationGetDimY(bitmap6) ) {
                pixels[6] = rsGetElementAt_uchar4(bitmap6, x+offset_x6, y+offset_y6);
            }
            else {
                pixels[6] = in;
                parameter_A[6] = parameter_A[mid_indx];
                parameter_B[6] = parameter_B[mid_indx];
            }
        }
	}

	/*pixels[0] = in;
	pixels[1] = in;
	pixels[2] = in;
	pixels[3] = in;
	pixels[4] = in;*/

	float3 hdr = (float3){0.0f, 0.0f, 0.0f};
	float sum_weight = 0.0f;

	// assumes odd number of up to 5 bitmaps, with middle bitmap being the "base" exposure, and first images being darker, last images being brighter
	{
		//const float safe_range_c = 64.0f;
		const float safe_range_c = 96.0f;
        float3 rgb = convert_float3(pixels[mid_indx].rgb);
		float avg = (rgb.r+rgb.g+rgb.b) / 3.0f;
		float diff = fabs( avg - 127.5f );
		float weight = 1.0f;
		if( diff > safe_range_c ) {
			// scaling chosen so that 0 and 255 map to a non-zero weight of 0.01
			weight = 1.0f - 0.99f * (diff - safe_range_c) / (127.5f - safe_range_c);
		}

		// response function
		rgb = parameter_A[mid_indx] * rgb + parameter_B[mid_indx];

		hdr += weight * rgb;
		sum_weight += weight;

		if( weight < 1.0 ) {
    		float3 base_rgb = rgb;
			int adj_indx = mid_indx;

			// now look at a neighbour image
			weight = 1.0f - weight;
			if( avg <= 127.5f ) {
    			adj_indx++;
			}
			else {
    			adj_indx--;
			}
            rgb = convert_float3(pixels[adj_indx].rgb);
            /*if( n_bitmaps_g > 3 ) {
                avg = (rgb.r+rgb.g+rgb.b) / 3.0f;
                diff = fabs( avg - 127.5f );
                if( diff > safe_range_c ) {
                    // scaling chosen so that 0 and 255 map to a non-zero weight of 0.01
                    weight *= 1.0f - 0.99f * (diff - safe_range_c) / (127.5f - safe_range_c);
                }
            }*/
            rgb = parameter_A[adj_indx] * rgb + parameter_B[adj_indx];

            float value = fmax(rgb.r, rgb.g);
            value = fmax(value, rgb.b);
			if( value <= 250.0f )
			{
                // deghosting
                // for overexposed pixels, we don't have a reliable value for that pixel, so we can't distinguish between
                // pixels that are overexposed, and those that need deghosting, so we limit to value <= 250.0f
                // tests that benefit from deghosting for dark pixels: testHDR2, testHDR9, testHDR19, testHDR21, testHDR30,
                // testHDR35, testHDR37, testHDR40, testHDR41, testHDR42, testHDR44
                // tests that benefit from deghosting for bright pixels: testHDR2, testHDR41, testHDR42
                // for 127.5-avg = 96.0, we want wiener_C = wiener_C_lo
                // for 127.5-avg = 127.5f, we want wiener_C = wiener_C_hi
                const float wiener_C_lo = 2000.0f;
                const float wiener_C_hi = 8000.0f;
                float wiener_C = wiener_C_lo; // higher value means more HDR but less ghosting
                float x = fabs( value - 127.5f ) - 96.0f;
                if( x > 0.0f ) {
                    const float scale = (wiener_C_hi-wiener_C_lo)/(127.5f-96.0f);
                    wiener_C = wiener_C_lo + x*scale;
                }
                float3 diff = base_rgb - rgb;
                float L = dot(diff, diff);
                float ghost_weight = L/(L+wiener_C);
                rgb = ghost_weight * base_rgb + (1.0-ghost_weight) * rgb;
            }

			hdr += weight * rgb;
			sum_weight += weight;

			/*if( n_bitmaps_g > 3 && diff > safe_range_c ) {
                // now look at a neighbour image
                weight = 1.0f - weight;

                if( avg <= 127.5f ) {
                    adj_indx++;
                }
                else {
                    adj_indx--;
                }
                rgb = convert_float3(pixels[adj_indx].rgb);
                if( n_bitmaps_g > 5 ) {
                    avg = (rgb.r+rgb.g+rgb.b) / 3.0f;
                    diff = fabs( avg - 127.5f );
                    if( diff > safe_range_c ) {
                        // scaling chosen so that 0 and 255 map to a non-zero weight of 0.01
                        weight *= 1.0f - 0.99f * (diff - safe_range_c) / (127.5f - safe_range_c);
                    }
                }
                rgb = parameter_A[adj_indx] * rgb + parameter_B[adj_indx];

                // todo: deghosting

                hdr += weight * rgb;
                sum_weight += weight;

                // testing: make all non-safe images purple:
                hdr.r = 255;
                hdr.g = 0;
                hdr.b = 255;
			}*/

			// testing: make all non-safe images purple:
			//hdr.r = 255;
			//hdr.g = 0;
			//hdr.b = 255;
		}
	}