better exposure control and white balance
This commit is contained in:
462
filmcolor
462
filmcolor
@ -9,6 +9,7 @@
|
||||
# "colour-science",
|
||||
# "torch",
|
||||
# "warp-lang",
|
||||
# "scikit-image",
|
||||
# ]
|
||||
# [tool.uv.sources]
|
||||
# torch = [{ index = "pytorch-cu128" }]
|
||||
@ -43,16 +44,8 @@ from scipy.signal.windows import gaussian # For creating Gaussian kernel
|
||||
|
||||
# --- Configuration ---
|
||||
EPSILON = 1e-10
|
||||
### PERFORMANCE ###
|
||||
# Size of the 3D LUTs. 33 is a common industry size (e.g., in .cube files).
|
||||
# 17 is faster to generate, 65 is more accurate but much slower to generate.
|
||||
LUT_SIZE = 65
|
||||
|
||||
GLOBAL_DEBUG = False
|
||||
|
||||
|
||||
# --- Global variables to ensure all debug images for a single run go to the same folder ---
|
||||
# This creates a unique, timestamped directory for each script execution.
|
||||
GLOBAL_DEBUG = True
|
||||
RUN_TIMESTAMP = datetime.now().strftime("%Y-%m-%d_%H%M%S")
|
||||
DEBUG_OUTPUT_DIR = Path(f"debug_outputs/{RUN_TIMESTAMP}")
|
||||
|
||||
@ -121,7 +114,6 @@ def save_debug_image(image: np.ndarray, tag: str, output_format: str = "jpeg"):
|
||||
f"⚠️ DEBUG WARNING: Could not save debug image for tag '{tag}'.\n Reason: {e}"
|
||||
)
|
||||
|
||||
|
||||
class Info:
|
||||
name: str
|
||||
description: str
|
||||
@ -138,7 +130,6 @@ class Info:
|
||||
version,
|
||||
)
|
||||
|
||||
|
||||
class Balance:
|
||||
r_shift: float
|
||||
g_shift: float
|
||||
@ -147,7 +138,6 @@ class Balance:
|
||||
def __init__(self, r_shift: float, g_shift: float, b_shift: float) -> None:
|
||||
self.r_shift, self.g_shift, self.b_shift = r_shift, g_shift, b_shift
|
||||
|
||||
|
||||
class Gamma:
|
||||
r_factor: float
|
||||
g_factor: float
|
||||
@ -156,7 +146,6 @@ class Gamma:
|
||||
def __init__(self, r_factor: float, g_factor: float, b_factor: float) -> None:
|
||||
self.r_factor, self.g_factor, self.b_factor = r_factor, g_factor, b_factor
|
||||
|
||||
|
||||
class Processing:
|
||||
gamma: Gamma
|
||||
balance: Balance
|
||||
@ -290,7 +279,7 @@ class FilmDatasheet:
|
||||
self.info, self.processing, self.properties = info, processing, properties
|
||||
|
||||
|
||||
### NEW: GPU-accelerated separable Gaussian blur helper
|
||||
|
||||
def _gpu_separable_gaussian_blur(
|
||||
image_tensor: torch.Tensor, sigma: float, device: str = "cpu"
|
||||
) -> torch.Tensor:
|
||||
@ -336,7 +325,6 @@ def _gpu_separable_gaussian_blur(
|
||||
|
||||
return blurred_tensor
|
||||
|
||||
|
||||
# --- Datasheet Parsing (unchanged) ---
|
||||
def parse_datasheet_json(json_filepath) -> FilmDatasheet | None:
|
||||
# This function remains identical to your last version
|
||||
@ -420,11 +408,8 @@ def parse_datasheet_json(json_filepath) -> FilmDatasheet | None:
|
||||
print(f"Error parsing datasheet JSON '{json_filepath}': {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
# --- Core Simulation Functions ---
|
||||
|
||||
|
||||
### PERFORMANCE ###
|
||||
def apply_3d_lut(image: np.ndarray, lut: np.ndarray, device="cpu") -> np.ndarray:
|
||||
"""
|
||||
Applies a 3D LUT to an image using PyTorch's grid_sample.
|
||||
@ -472,8 +457,6 @@ def apply_3d_lut(image: np.ndarray, lut: np.ndarray, device="cpu") -> np.ndarray
|
||||
|
||||
return result_numpy
|
||||
|
||||
|
||||
### PERFORMANCE ###
|
||||
def create_exposure_lut(
|
||||
spectral_data: list[SpectralSensitivityCurvePoint],
|
||||
ref_illuminant_spd,
|
||||
@ -500,8 +483,6 @@ def create_exposure_lut(
|
||||
# Reshape back into a 3D LUT
|
||||
return lut_data.reshape(size, size, size, 3)
|
||||
|
||||
|
||||
### PERFORMANCE ###
|
||||
def create_density_lut(
|
||||
processing: Processing,
|
||||
hd_curve: List[HDCurvePoint],
|
||||
@ -524,10 +505,6 @@ def create_density_lut(
|
||||
)
|
||||
return lut_data
|
||||
|
||||
|
||||
# --- (The rest of the core functions: compute_inhibitor_matrix, compute_uncoupled_hd_curves,
|
||||
# apply_dir_coupler_simulation, um_to_pixels, apply_hd_curves, apply_saturation_rgb,
|
||||
# apply_spatial_effects, calculate_film_log_exposure, remain UNCHANGED from the previous version) ---
|
||||
def compute_inhibitor_matrix(
|
||||
amount_rgb: List[float], diffusion_interlayer: float
|
||||
) -> np.ndarray:
|
||||
@ -544,7 +521,6 @@ def compute_inhibitor_matrix(
|
||||
matrix = matrix / row_sums[:, np.newaxis]
|
||||
return matrix * np.array(amount_rgb)[:, np.newaxis]
|
||||
|
||||
|
||||
def compute_uncoupled_hd_curves(
|
||||
hd_curve_data: List[HDCurvePoint], inhibitor_matrix: np.ndarray
|
||||
) -> List[HDCurvePoint]:
|
||||
@ -570,7 +546,6 @@ def compute_uncoupled_hd_curves(
|
||||
for i, log_e in enumerate(log_E_values)
|
||||
]
|
||||
|
||||
|
||||
def apply_dir_coupler_simulation(
|
||||
log_exposure_rgb,
|
||||
naive_density_rgb,
|
||||
@ -592,14 +567,12 @@ def apply_dir_coupler_simulation(
|
||||
)
|
||||
return log_exposure_rgb - inhibitor_effect
|
||||
|
||||
|
||||
def um_to_pixels(sigma_um, image_width_px, film_format_mm):
|
||||
if film_format_mm <= 0 or image_width_px <= 0:
|
||||
return 0
|
||||
microns_per_pixel = (film_format_mm * 1000.0) / image_width_px
|
||||
return sigma_um / microns_per_pixel
|
||||
|
||||
|
||||
def apply_hd_curves(
|
||||
log_exposure_rgb,
|
||||
processing: Processing,
|
||||
@ -645,7 +618,6 @@ def apply_hd_curves(
|
||||
)
|
||||
return density_rgb
|
||||
|
||||
|
||||
def apply_saturation_rgb(image_linear, saturation_factor):
|
||||
if saturation_factor == 1.0:
|
||||
return image_linear
|
||||
@ -659,66 +631,6 @@ def apply_saturation_rgb(image_linear, saturation_factor):
|
||||
1.0,
|
||||
)
|
||||
|
||||
|
||||
def white_balance_to_d65(image_linear_srgb: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Attempts to white balance a linear sRGB image to a D65 illuminant.
|
||||
|
||||
This function is for non-RAW images where the white balance is already baked in.
|
||||
It uses the "Gray World" assumption to estimate the current illuminant.
|
||||
|
||||
Args:
|
||||
image_linear_srgb: A numpy array representing the image in linear sRGB space.
|
||||
|
||||
Returns:
|
||||
A numpy array of the white-balanced image, also in linear sRGB space.
|
||||
"""
|
||||
print("Attempting to force D65 white balance using Gray World estimation...")
|
||||
|
||||
# 1. Estimate the illuminant of the source image.
|
||||
# The Gray World assumption states that the average pixel color is the illuminant.
|
||||
source_illuminant_rgb = np.mean(image_linear_srgb, axis=(0, 1))
|
||||
|
||||
# Avoid division by zero if the image is black
|
||||
if np.all(source_illuminant_rgb < EPSILON):
|
||||
return image_linear_srgb
|
||||
|
||||
# 2. Define the input and output color spaces and illuminants.
|
||||
# We assume the image is sRGB, which also uses a D65 white point *by definition*.
|
||||
# However, if the image has a color cast, its *actual* illuminant is not D65.
|
||||
# We are adapting from the *estimated* illuminant back to the *ideal* D65.
|
||||
srgb_cs = colour.models.RGB_COLOURSPACE_sRGB
|
||||
|
||||
# The target illuminant is D65. We get its XYZ value from colour-science.
|
||||
target_illuminant_xyz = colour.CCS_ILLUMINANTS[
|
||||
"CIE 1931 2 Degree Standard Observer"
|
||||
]["D65"]
|
||||
|
||||
# Convert our estimated RGB illuminant to XYZ.
|
||||
# source_illuminant_rgb are assumed to be in the sRGB colourspace.
|
||||
# srgb_cs is the sRGB colourspace definition, which includes its D65 whitepoint and conversion matrix.
|
||||
source_illuminant_xyz = colour.RGB_to_XYZ(
|
||||
source_illuminant_rgb, # RGB values to convert
|
||||
srgb_cs, # Definition of the RGB colourspace these values are in (sRGB)
|
||||
srgb_cs.whitepoint, # CIE XYZ of the illuminant for the sRGB colourspace (D65)
|
||||
# This ensures conversion without further chromatic adaptation at this step,
|
||||
# as the input RGB values are already adapted to srgb_cs.whitepoint.
|
||||
)
|
||||
|
||||
# 3. Perform the chromatic adaptation.
|
||||
# We use the Von Kries transform, which is a common and effective method.
|
||||
image_adapted_linear_srgb = colour.adaptation.chromatic_adaptation(
|
||||
image_linear_srgb,
|
||||
source_illuminant_xyz,
|
||||
target_illuminant_xyz,
|
||||
method="Von Kries",
|
||||
transform="CAT02", # CAT02 is a well-regarded choice
|
||||
)
|
||||
|
||||
# 4. Clip to ensure values remain valid.
|
||||
return np.clip(image_adapted_linear_srgb, 0.0, 1.0)
|
||||
|
||||
|
||||
def apply_spatial_effects(
|
||||
image,
|
||||
film_format_mm,
|
||||
@ -760,7 +672,6 @@ def apply_spatial_effects(
|
||||
else np.clip(image, 0.0, 1.0)
|
||||
)
|
||||
|
||||
|
||||
def calculate_film_log_exposure(
|
||||
image_linear_srgb,
|
||||
spectral_data: list[SpectralSensitivityCurvePoint],
|
||||
@ -814,7 +725,6 @@ def calculate_film_log_exposure(
|
||||
log_shift = middle_gray_logE - np.log10(exposure_18_gray_film[1] + EPSILON)
|
||||
return np.log10(film_exposure_values + EPSILON) + log_shift
|
||||
|
||||
|
||||
def apply_spatial_effects_new(
|
||||
image: np.ndarray,
|
||||
film_format_mm: int,
|
||||
@ -920,75 +830,207 @@ def apply_spatial_effects_new(
|
||||
|
||||
return result_numpy
|
||||
|
||||
def chromatic_adaptation_white_balance(
|
||||
image_linear_srgb: np.ndarray,
|
||||
target_illuminant_name: str = "D65",
|
||||
source_estimation_p_norm: float = 6.0,
|
||||
source_estimation_clip_percentile: float = 2.0,
|
||||
adaptation_method: str = "Von Kries",
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Performs white balance using chromatic adaptation to a target illuminant,
|
||||
while preserving the original image's perceived luminance.
|
||||
"""
|
||||
print(f"Applying Chromatic Adaptation White Balance to {target_illuminant_name} (luminance preserving)...")
|
||||
|
||||
def calculate_and_apply_exposure_correction(final_image_to_save):
|
||||
patch_ratio = 0.8
|
||||
if np.all(image_linear_srgb < 1e-6):
|
||||
print(" - Image is black, skipping WB.")
|
||||
return image_linear_srgb
|
||||
|
||||
# --- Target Patch Extraction ---
|
||||
h, w, _ = final_image_to_save.shape
|
||||
patch_h = int(h * patch_ratio)
|
||||
patch_w = int(w * patch_ratio)
|
||||
# 1. Estimate Source Illuminant (adapted from your apply_shades_of_grey_wb)
|
||||
img_for_estimation = image_linear_srgb.copy()
|
||||
epsilon = 1e-10
|
||||
|
||||
# Calculate top-left corner of the central patch
|
||||
start_h = (h - patch_h) // 2
|
||||
start_w = (w - patch_w) // 2
|
||||
if source_estimation_clip_percentile > 0:
|
||||
lower = np.percentile(img_for_estimation, source_estimation_clip_percentile, axis=(0,1))
|
||||
upper = np.percentile(img_for_estimation, 100 - source_estimation_clip_percentile, axis=(0,1))
|
||||
# Ensure lower is not greater than upper to avoid issues with uniform images
|
||||
lower = np.minimum(lower, upper - epsilon)
|
||||
img_for_estimation = np.clip(img_for_estimation, lower, upper)
|
||||
|
||||
# Extract the patch using numpy slicing
|
||||
patch = final_image_to_save[
|
||||
start_h : start_h + patch_h, start_w : start_w + patch_w
|
||||
]
|
||||
|
||||
# --- Colorimetric Measurement of the Patch ---
|
||||
# Calculate the mean sRGB value of the patch
|
||||
avg_rgb_patch = np.mean(patch, axis=(0, 1))
|
||||
print(f" - Average linear sRGB of patch: {np.round(avg_rgb_patch, 4)}")
|
||||
r_est = img_for_estimation[..., 0]
|
||||
g_est = img_for_estimation[..., 1]
|
||||
b_est = img_for_estimation[..., 2]
|
||||
|
||||
# Define our target: In linear space, middle gray is typically around 0.18 (18%)
|
||||
# This corresponds to L*≈46.6 in CIELAB space, not 50
|
||||
target_luminance_Y = 0.18 # Standard photographic middle gray
|
||||
|
||||
# For reference, calculate what L* value this corresponds to
|
||||
target_lab_L = colour.XYZ_to_Lab(
|
||||
np.array([target_luminance_Y, target_luminance_Y, target_luminance_Y])
|
||||
)[0]
|
||||
print(f" - Target middle gray L*: {target_lab_L:.1f}")
|
||||
|
||||
# --- Calculating the Scaling Factor ---
|
||||
# Convert the average patch sRGB to CIE XYZ
|
||||
input_xyz = colour.sRGB_to_XYZ(avg_rgb_patch)
|
||||
input_luminance_Y = input_xyz[1]
|
||||
|
||||
print(f" - Input patch luminance (Y): {input_luminance_Y:.4f}")
|
||||
print(f" - Target middle gray luminance (Y): {target_luminance_Y:.4f}")
|
||||
|
||||
# The scale factor is the ratio of target luminance to input luminance
|
||||
# Avoid division by zero for black patches
|
||||
if input_luminance_Y < EPSILON:
|
||||
scale_factor = 1.0 # Cannot correct a black patch, so do nothing
|
||||
if source_estimation_p_norm == float("inf"):
|
||||
illum_r_rgb = np.max(r_est) + epsilon
|
||||
illum_g_rgb = np.max(g_est) + epsilon
|
||||
illum_b_rgb = np.max(b_est) + epsilon
|
||||
else:
|
||||
scale_factor = target_luminance_Y / input_luminance_Y
|
||||
illum_r_rgb = np.power(np.mean(np.power(r_est, source_estimation_p_norm)), 1 / source_estimation_p_norm) + epsilon
|
||||
illum_g_rgb = np.power(np.mean(np.power(g_est, source_estimation_p_norm)), 1 / source_estimation_p_norm) + epsilon
|
||||
illum_b_rgb = np.power(np.mean(np.power(b_est, source_estimation_p_norm)), 1 / source_estimation_p_norm) + epsilon
|
||||
|
||||
print(f" - Calculated exposure scale factor: {scale_factor:.4f}")
|
||||
ev_change = np.log2(scale_factor)
|
||||
print(f" - Equivalent EV change: {ev_change:+.2f} EV")
|
||||
# cleamp scale_factor to a ev change of +/- 1.5
|
||||
if ev_change < -1.5:
|
||||
scale_factor = 2**-1.5
|
||||
elif ev_change > 1.5:
|
||||
scale_factor = 2**1.5
|
||||
ev_change = np.log2(scale_factor)
|
||||
print(
|
||||
f" - Clamped exposure scale factor: {scale_factor:.4f} (EV change: {ev_change:+.2f})"
|
||||
source_illuminant_RGB = np.array([illum_r_rgb, illum_g_rgb, illum_b_rgb])
|
||||
# print(f" - Estimated Source Illuminant (RGB): {np.round(source_illuminant_RGB, 4)}") # Optional debug
|
||||
|
||||
srgb_colourspace = colour.models.RGB_COLOURSPACE_sRGB
|
||||
source_illuminant_XYZ = colour.RGB_to_XYZ(source_illuminant_RGB,
|
||||
srgb_colourspace,
|
||||
srgb_colourspace.whitepoint,
|
||||
adaptation_method)
|
||||
print(f" - Estimated Source Illuminant (XYZ): {np.round(source_illuminant_XYZ, 4)}")
|
||||
|
||||
# 2. Define Target Illuminant and Normalize its Luminance (Y usually to 1.0 for reference)
|
||||
try:
|
||||
target_illuminant_XYZ_ref = colour.xy_to_XYZ(srgb_colourspace.whitepoint) # Default to sRGB's D65
|
||||
if target_illuminant_name:
|
||||
target_illuminant_spd = SDS_ILLUMINANTS.get(target_illuminant_name)
|
||||
if target_illuminant_spd:
|
||||
# sd_to_XYZ often returns Y=100, so normalize to Y=1
|
||||
xyz_from_sd = colour.sd_to_XYZ(target_illuminant_spd, illuminant=srgb_colourspace.whitepoint)
|
||||
if xyz_from_sd[1] != 0:
|
||||
target_illuminant_XYZ_ref = xyz_from_sd / xyz_from_sd[1] # Normalize Y to 1.0
|
||||
else: # Should not happen for standard illuminants
|
||||
target_illuminant_XYZ_ref = xyz_from_sd
|
||||
else:
|
||||
print(f" - Warning: Target illuminant '{target_illuminant_name}' not found. Using sRGB D65.")
|
||||
# Ensure Y is around 1.0 after potential direct XYZ fetching or other sources
|
||||
if target_illuminant_XYZ_ref[1] != 0 and not np.isclose(target_illuminant_XYZ_ref[1], 1.0):
|
||||
target_illuminant_XYZ_ref = target_illuminant_XYZ_ref / target_illuminant_XYZ_ref[1]
|
||||
|
||||
except Exception as e:
|
||||
print(f" - Error defining target illuminant '{target_illuminant_name}', defaulting to sRGB D65 (Y=1). Error: {e}")
|
||||
target_illuminant_XYZ_ref = colour.xy_to_XYZ(srgb_colourspace.whitepoint)
|
||||
if target_illuminant_XYZ_ref[1] != 0: # Ensure D65 default is Y=1
|
||||
target_illuminant_XYZ_ref = target_illuminant_XYZ_ref / target_illuminant_XYZ_ref[1]
|
||||
|
||||
print(f" - Reference Target Illuminant {target_illuminant_name} (XYZ, Y normalized to 1): {np.round(target_illuminant_XYZ_ref, 4)}")
|
||||
|
||||
# 3. Check for effective no-op
|
||||
# Compare chromaticities of source and reference target
|
||||
# Using a small epsilon to avoid division by zero in XYZ_to_xy if XYZ is black
|
||||
xy_source = colour.XYZ_to_xy(source_illuminant_XYZ + epsilon)
|
||||
xy_target_ref = colour.XYZ_to_xy(target_illuminant_XYZ_ref + epsilon)
|
||||
|
||||
if np.allclose(xy_source, xy_target_ref, atol=1e-3):
|
||||
print(" - Source and target illuminants have very similar chromaticity. Skipping CAT to preserve luminance.")
|
||||
return np.clip(image_linear_srgb, 0.0, 1.0)
|
||||
|
||||
# 4. Create a version of the target illuminant that has the *same luminance* as the source illuminant.
|
||||
# This ensures the CAT primarily adjusts chromaticity.
|
||||
if source_illuminant_XYZ[1] > epsilon: # Check Y component of source is not effectively zero
|
||||
# Scale the (Y=1 normalized) target illuminant's chromaticity to match the source's luminance
|
||||
XYZ_target_for_CAT = target_illuminant_XYZ_ref * source_illuminant_XYZ[1]
|
||||
else:
|
||||
# Source illuminant is effectively black or has zero luminance.
|
||||
# Meaningful adaptation isn't possible without massive, undefined gain.
|
||||
print(" - Source illuminant luminance is effectively zero. Skipping adaptation.")
|
||||
return np.clip(image_linear_srgb, 0.0, 1.0)
|
||||
|
||||
print(f" - Target for CAT (D65 chromaticity, source luminance) (XYZ): {np.round(XYZ_target_for_CAT, 4)}")
|
||||
|
||||
|
||||
# 5. Perform Chromatic Adaptation
|
||||
adapted_image_linear_srgb = colour.adaptation.chromatic_adaptation(
|
||||
image_linear_srgb,
|
||||
source_illuminant_XYZ, # Original estimated source illuminant
|
||||
XYZ_target_for_CAT, # Target illuminant with D65 chromaticity but matched to source's luminance
|
||||
method=adaptation_method,
|
||||
transform_kwargs={'incomplete': True, 'inverse_transform_kwargs': {'incomplete': True}}
|
||||
)
|
||||
|
||||
# --- Applying the Correction ---
|
||||
# Apply the calculated scale factor to the entire image
|
||||
final_image_to_save = final_image_to_save * scale_factor
|
||||
# 6. Clip and return
|
||||
final_image = np.clip(adapted_image_linear_srgb, 0.0, 1.0)
|
||||
print(" - Luminance-Preserving Chromatic Adaptation White Balance applied.")
|
||||
return final_image
|
||||
|
||||
# Clip the result to the valid [0.0, 1.0] range
|
||||
final_image_to_save = np.clip(final_image_to_save, 0.0, 1.0)
|
||||
return final_image_to_save
|
||||
def auto_exposure_and_contrast(
|
||||
image: np.ndarray,
|
||||
black_point_percentile: float = 0.1,
|
||||
white_point_percentile: float = 99.9,
|
||||
middle_gray_target: float = 0.18,
|
||||
gamma_clamp: tuple = (0.4, 2.5)
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Performs a more robust automatic exposure and contrast adjustment.
|
||||
|
||||
This function improves upon simple linear scaling by:
|
||||
1. Setting black and white points based on percentiles to enhance contrast
|
||||
and remove haze. This is done per-channel to help correct color casts.
|
||||
2. Applying a non-linear gamma (power-law) correction to adjust mid-tones,
|
||||
which preserves highlight and shadow detail far better than linear scaling.
|
||||
3. Using the median luminance for its exposure calculation, which is more
|
||||
robust to high-key or low-key images than a simple average.
|
||||
4. Clamping the gamma adjustment to prevent extreme, unnatural changes.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): The input linear sRGB image with values in [0, 1].
|
||||
black_point_percentile (float): The percentile for setting the black point.
|
||||
white_point_percentile (float): The percentile for setting the white point.
|
||||
middle_gray_target (float): The target linear luminance for the image's median.
|
||||
gamma_clamp (tuple): A (min, max) tuple to constrain the gamma correction factor.
|
||||
|
||||
Returns:
|
||||
np.ndarray: The adjusted image, clipped to the [0, 1] range.
|
||||
"""
|
||||
print("Applying intelligent exposure and contrast adjustment...")
|
||||
|
||||
# Return immediately if the image is all black to avoid errors.
|
||||
if np.all(image < 1e-6):
|
||||
print(" - Image is black, skipping adjustment.")
|
||||
return image
|
||||
|
||||
# --- 1. Automatic Black & White Point Adjustment (Levels) ---
|
||||
# This enhances contrast by stretching the dynamic range of the image.
|
||||
# It is performed per-channel to help correct for color casts in the shadows/highlights.
|
||||
black_points = np.percentile(image, black_point_percentile, axis=(0, 1))
|
||||
white_points = np.percentile(image, white_point_percentile, axis=(0, 1))
|
||||
|
||||
# Ensure black points are not higher than white points to avoid inversion.
|
||||
# Add a small epsilon for numerical stability.
|
||||
black_points = np.minimum(black_points, white_points - 1e-6)
|
||||
|
||||
print(f" - Calculated Black Points (R,G,B): {np.round(black_points, 4)}")
|
||||
print(f" - Calculated White Points (R,G,B): {np.round(white_points, 4)}")
|
||||
|
||||
# Apply the levels adjustment to stretch the contrast.
|
||||
contrast_adjusted_image = (image - black_points) / (white_points - black_points)
|
||||
contrast_adjusted_image = np.clip(contrast_adjusted_image, 0.0, 1.0)
|
||||
save_debug_image(contrast_adjusted_image, "12a_contrast_adjusted_image_RGB")
|
||||
|
||||
|
||||
# --- 2. Gamma-based Exposure Correction (Mid-tone Brightness) ---
|
||||
# This adjusts the mid-tones without re-clipping the black/white points.
|
||||
# We use a luminance approximation (BT.709) for perceptual relevance.
|
||||
luminance = np.einsum("...c, c -> ...", contrast_adjusted_image, np.array([0.2126, 0.7152, 0.0722]))
|
||||
|
||||
# Calculate the median of the luminance. The median is more robust to outliers
|
||||
# (like specular highlights or deep shadows) than the mean.
|
||||
current_median_lum = np.median(luminance)
|
||||
print(f" - Current median luminance: {current_median_lum:.4f}")
|
||||
print(f" - Target median luminance: {middle_gray_target:.4f}")
|
||||
|
||||
# Avoid division by zero or log(0) for very dark images.
|
||||
if current_median_lum < 1e-6:
|
||||
print(" - Median luminance is zero, skipping gamma correction.")
|
||||
return contrast_adjusted_image
|
||||
|
||||
# Calculate the required gamma correction using the formula: power = log(target) / log(current).
|
||||
# This formula finds the power `p` such that `current^p = target`.
|
||||
gamma = np.log(middle_gray_target) / np.log(current_median_lum)
|
||||
|
||||
# Clamp the gamma to a reasonable range to prevent extreme adjustments.
|
||||
min_gamma, max_gamma = gamma_clamp
|
||||
clamped_gamma = np.clip(gamma, min_gamma, max_gamma)
|
||||
print(f" - Calculated Gamma: {gamma:.4f} (Clamped to: {clamped_gamma:.4f})")
|
||||
|
||||
# Apply the gamma correction to the contrast-adjusted image.
|
||||
corrected_image = contrast_adjusted_image ** clamped_gamma
|
||||
|
||||
final_image = np.clip(corrected_image, 0.0, 1.0)
|
||||
return final_image
|
||||
|
||||
def apply_shades_of_grey_wb(final_image_to_save):
|
||||
print("Applying Shades of Gray white balance...")
|
||||
@ -1065,81 +1107,6 @@ def apply_shades_of_grey_wb(final_image_to_save):
|
||||
final_image_to_save = np.clip(balanced_img, 0.0, 1.0)
|
||||
return final_image_to_save
|
||||
|
||||
|
||||
def apply_darktable_color_calibration(final_image_to_save):
|
||||
print("Applying DarkTable-style Color Calibration white balance...")
|
||||
|
||||
# --- Illuminant Estimation ---
|
||||
# Get original shape and prepare for sampling
|
||||
height, width, _ = final_image_to_save.shape
|
||||
|
||||
# Sample the central 80% of the image area to estimate the illuminant
|
||||
# This is more robust than using the whole image, which might have black borders etc.
|
||||
area_ratio = 0.8
|
||||
scale = np.sqrt(area_ratio)
|
||||
sample_width = int(width * scale)
|
||||
sample_height = int(height * scale)
|
||||
x_offset = (width - sample_width) // 2
|
||||
y_offset = (height - sample_height) // 2
|
||||
|
||||
# Extract the central patch for sampling
|
||||
patch = final_image_to_save[
|
||||
y_offset : y_offset + sample_height, x_offset : x_offset + sample_width
|
||||
]
|
||||
|
||||
# Estimate the source illuminant using the Gray World assumption on the patch
|
||||
# The average color of the scene is assumed to be the illuminant color.
|
||||
source_rgb_avg = np.mean(patch, axis=(0, 1))
|
||||
|
||||
# Avoid issues with pure black patches
|
||||
if np.all(source_rgb_avg < EPSILON):
|
||||
print(" - Patch is black, skipping white balance.")
|
||||
return final_image_to_save
|
||||
|
||||
# Convert the average RGB value to CIE XYZ. This represents our source illuminant.
|
||||
# We use the sRGB colorspace definition, which assumes a D65 reference white for the RGB values.
|
||||
source_illuminant_xyz = colour.RGB_to_XYZ(
|
||||
source_rgb_avg,
|
||||
colour.models.RGB_COLOURSPACE_sRGB,
|
||||
colour.models.RGB_COLOURSPACE_sRGB.whitepoint,
|
||||
)
|
||||
|
||||
# --- Chromatic Adaptation ---
|
||||
# Define the target illuminant. For sRGB output, this should be D65.
|
||||
# Using D65 ensures that neutral colors in the scene are mapped to neutral
|
||||
# colors in the final sRGB image.
|
||||
target_illuminant_xyz = colour.CCS_ILLUMINANTS[
|
||||
"CIE 1931 2 Degree Standard Observer"
|
||||
]["D65"]
|
||||
|
||||
# Ensure target_illuminant_xyz has 3 components (sometimes returns only x,y)
|
||||
if len(target_illuminant_xyz) == 2:
|
||||
# Convert xyY to XYZ assuming Y=1
|
||||
x, y = target_illuminant_xyz
|
||||
X = x / y
|
||||
Y = 1.0
|
||||
Z = (1 - x - y) / y
|
||||
target_illuminant_xyz = np.array([X, Y, Z])
|
||||
|
||||
print(f" - Source Illuminant (XYZ): {np.round(source_illuminant_xyz, 3)}")
|
||||
print(f" - Target Illuminant (XYZ): {np.round(target_illuminant_xyz, 3)}")
|
||||
|
||||
# Apply chromatic adaptation to the entire image.
|
||||
# This transforms the image colors as if the scene was lit by the target illuminant.
|
||||
# CAT16 is a modern and accurate Chromatic Adaptation Transform.
|
||||
final_image_to_save = colour.adaptation.chromatic_adaptation(
|
||||
final_image_to_save,
|
||||
source_illuminant_xyz, # Source illuminant XYZ
|
||||
target_illuminant_xyz, # Target illuminant (D65)
|
||||
method="Von Kries",
|
||||
transform="CAT16",
|
||||
)
|
||||
|
||||
# Clip to valid range and return
|
||||
final_image_to_save = np.clip(final_image_to_save, 0.0, 1.0)
|
||||
return final_image_to_save
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simulate film stock color characteristics using a datasheet JSON."
|
||||
@ -1478,7 +1445,7 @@ def main():
|
||||
else image_float
|
||||
)
|
||||
if args.force_d65:
|
||||
image_linear = white_balance_to_d65(image_linear)
|
||||
print("Only supporting D65 white balance for RAW files.")
|
||||
except Exception as e:
|
||||
print(f"Error reading input image: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
@ -1683,29 +1650,7 @@ def main():
|
||||
final_image_to_save = np.clip(inverted_image / max_val, 0.0, 1.0)
|
||||
save_debug_image(final_image_to_save, "10_negative_corrected_image_RGB")
|
||||
|
||||
# Apply White Balance Correction
|
||||
DO_SHADES_OF_GRAY = True # Use Shades of Gray algorithm for white balance
|
||||
if args.perform_white_balance:
|
||||
if not args.perform_negative_correction:
|
||||
print(
|
||||
"Warning: White balance correction is only effective when using --perform-negative-correction. Ignoring flag."
|
||||
)
|
||||
else:
|
||||
if DO_SHADES_OF_GRAY:
|
||||
final_image_to_save = apply_shades_of_grey_wb(final_image_to_save)
|
||||
save_debug_image(
|
||||
final_image_to_save, "11_shades_of_gray_corrected_image_RGB"
|
||||
)
|
||||
else:
|
||||
final_image_to_save = apply_darktable_color_calibration(
|
||||
final_image_to_save
|
||||
)
|
||||
save_debug_image(
|
||||
final_image_to_save,
|
||||
"11_darktable_color_calibration_corrected_image_RGB",
|
||||
)
|
||||
|
||||
# Apply Exposure Correction
|
||||
# Apply Exposure Correction
|
||||
if args.perform_exposure_correction:
|
||||
print("Applying exposure correction...")
|
||||
if not args.perform_negative_correction:
|
||||
@ -1714,11 +1659,23 @@ def main():
|
||||
)
|
||||
else:
|
||||
# Define the patch ratio for measurement
|
||||
final_image_to_save = calculate_and_apply_exposure_correction(
|
||||
final_image_to_save = auto_exposure_and_contrast(
|
||||
final_image_to_save
|
||||
)
|
||||
save_debug_image(final_image_to_save, "12_exposure_corrected_image_RGB")
|
||||
|
||||
# Apply White Balance Correction
|
||||
if args.perform_white_balance:
|
||||
if not args.perform_negative_correction:
|
||||
print(
|
||||
"Warning: White balance correction is only effective when using --perform-negative-correction. Ignoring flag."
|
||||
)
|
||||
else:
|
||||
final_image_to_save = chromatic_adaptation_white_balance(final_image_to_save)
|
||||
save_debug_image(
|
||||
final_image_to_save, "11_shades_of_gray_corrected_image_RGB"
|
||||
)
|
||||
|
||||
# Apply Tone Curve Correction
|
||||
|
||||
# Apply Film Grain
|
||||
@ -2200,6 +2157,10 @@ def main():
|
||||
if args.input_image.lower().endswith((".dng")):
|
||||
final_image_to_save = final_image_to_save[64:, :, :]
|
||||
|
||||
|
||||
# Apply Linear to Output Color Space Conversion
|
||||
# final_image_to_save = colour.cctf_encoding(final_image_to_save, function="sRGB")
|
||||
|
||||
output_image = (
|
||||
np.clip(final_image_to_save, 0.0, 1.0)
|
||||
* (65535.0 if args.output_image.lower().endswith((".tiff", ".tif")) else 255.0)
|
||||
@ -2209,6 +2170,5 @@ def main():
|
||||
iio.imwrite(args.output_image, output_image)
|
||||
print("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user