Files
filmsim/filmcolor
2025-06-08 22:12:39 -04:00

953 lines
35 KiB
Plaintext
Executable File

#!/usr/bin/env -S uv run --script
# /// script
# dependencies = [
# "numpy",
# "scipy",
# "Pillow",
# "imageio",
# "rawpy",
# "colour-science",
# ]
# ///
import argparse
import csv
import numpy as np
import imageio.v3 as iio
from scipy.interpolate import interp1d
from scipy.ndimage import (
gaussian_filter,
gaussian_filter1d,
map_coordinates,
) # For LUTs
import rawpy
import os
import sys
import colour
from colour.colorimetry import SDS_ILLUMINANTS
import json
from typing import Optional, List
from pathlib import Path
# --- Configuration ---
EPSILON = 1e-10
### PERFORMANCE ###
# Size of the 3D LUTs. 33 is a common industry size (e.g., in .cube files).
# 17 is faster to generate, 65 is more accurate but much slower to generate.
LUT_SIZE = 65
# --- Data Structures (unchanged) ---
class Info:
name: str
description: str
format_mm: int
version: str
def __init__(
self, name: str, description: str, format_mm: int, version: str
) -> None:
self.name, self.description, self.format_mm, self.version = (
name,
description,
format_mm,
version,
)
class Balance:
r_shift: float
g_shift: float
b_shift: float
def __init__(self, r_shift: float, g_shift: float, b_shift: float) -> None:
self.r_shift, self.g_shift, self.b_shift = r_shift, g_shift, b_shift
class Gamma:
r_factor: float
g_factor: float
b_factor: float
def __init__(self, r_factor: float, g_factor: float, b_factor: float) -> None:
self.r_factor, self.g_factor, self.b_factor = r_factor, g_factor, b_factor
class Processing:
gamma: Gamma
balance: Balance
def __init__(self, gamma: Gamma, balance: Balance) -> None:
self.gamma, self.balance = gamma, balance
class Couplers:
saturation_amount: float
dir_amount_rgb: List[float]
dir_diffusion_um: float
dir_diffusion_interlayer: float
def __init__(
self,
saturation_amount: float,
dir_amount_rgb: List[float],
dir_diffusion_um: float,
dir_diffusion_interlayer: float,
) -> None:
(
self.saturation_amount,
self.dir_amount_rgb,
self.dir_diffusion_um,
self.dir_diffusion_interlayer,
) = (
saturation_amount,
dir_amount_rgb,
dir_diffusion_um,
dir_diffusion_interlayer,
)
class HDCurvePoint:
d: Optional[float]
r: float
g: float
b: float
def __init__(self, d: Optional[float], r: float, g: float, b: float) -> None:
self.d, self.r, self.g, self.b = d, r, g, b
class SpectralSensitivityCurvePoint:
wavelength: float
y: float
m: float
c: float
def __init__(self, wavelength: float, y: float, m: float, c: float) -> None:
self.wavelength, self.y, self.m, self.c = wavelength, y, m, c
class RGBValue:
r: float
g: float
b: float
def __init__(self, r: float, g: float, b: float) -> None:
self.r, self.g, self.b = r, g, b
class Curves:
hd: List[HDCurvePoint]
spectral_sensitivity: List[SpectralSensitivityCurvePoint]
def __init__(
self,
hd: List[HDCurvePoint],
spectral_sensitivity: List[SpectralSensitivityCurvePoint],
) -> None:
self.hd, self.spectral_sensitivity = hd, spectral_sensitivity
class Halation:
strength: RGBValue
size_um: RGBValue
def __init__(self, strength: RGBValue, size_um: RGBValue) -> None:
self.strength, self.size_um = strength, size_um
class Interlayer:
diffusion_um: float
def __init__(self, diffusion_um: float) -> None:
self.diffusion_um = diffusion_um
class Calibration:
iso: int
middle_gray_logE: float
def __init__(self, iso: int, middle_gray_logE: float) -> None:
self.iso, self.middle_gray_logE = iso, middle_gray_logE
class Properties:
halation: Halation
couplers: Couplers
interlayer: Interlayer
curves: Curves
calibration: Calibration
def __init__(
self,
halation: Halation,
couplers: Couplers,
interlayer: Interlayer,
curves: Curves,
calibration: Calibration,
) -> None:
self.halation, self.couplers, self.interlayer, self.curves, self.calibration = (
halation,
couplers,
interlayer,
curves,
calibration,
)
class FilmDatasheet:
info: Info
processing: Processing
properties: Properties
def __init__(
self, info: Info, processing: Processing, properties: Properties
) -> None:
self.info, self.processing, self.properties = info, processing, properties
# --- Datasheet Parsing (unchanged) ---
def parse_datasheet_json(json_filepath) -> FilmDatasheet | None:
# This function remains identical to your last version
# ...
if not os.path.exists(json_filepath):
print(f"Error: Datasheet file not found at {json_filepath}", file=sys.stderr)
return None
try:
with open(json_filepath, "r") as jsonfile:
data = json.load(jsonfile)
info = Info(
name=data["info"]["name"],
description=data["info"]["description"],
format_mm=data["info"]["format_mm"],
version=data["info"]["version"],
)
gamma = Gamma(
r_factor=data["processing"]["gamma"]["r_factor"],
g_factor=data["processing"]["gamma"]["g_factor"],
b_factor=data["processing"]["gamma"]["b_factor"],
)
balance = Balance(
r_shift=data["processing"]["balance"]["r_shift"],
g_shift=data["processing"]["balance"]["g_shift"],
b_shift=data["processing"]["balance"]["b_shift"],
)
processing = Processing(gamma=gamma, balance=balance)
halation = Halation(
strength=RGBValue(
r=data["properties"]["halation"]["strength"]["r"],
g=data["properties"]["halation"]["strength"]["g"],
b=data["properties"]["halation"]["strength"]["b"],
),
size_um=RGBValue(
r=data["properties"]["halation"]["size_um"]["r"],
g=data["properties"]["halation"]["size_um"]["g"],
b=data["properties"]["halation"]["size_um"]["b"],
),
)
couplers = Couplers(
saturation_amount=data["properties"]["couplers"]["saturation_amount"],
dir_amount_rgb=data["properties"]["couplers"]["dir_amount_rgb"],
dir_diffusion_um=data["properties"]["couplers"]["dir_diffusion_um"],
dir_diffusion_interlayer=data["properties"]["couplers"][
"dir_diffusion_interlayer"
],
)
interlayer = Interlayer(
diffusion_um=data["properties"]["interlayer"]["diffusion_um"]
)
calibration = Calibration(
iso=data["properties"]["calibration"]["iso"],
middle_gray_logE=data["properties"]["calibration"]["middle_gray_logE"],
)
curves = Curves(
hd=[
HDCurvePoint(d=point["d"], r=point["r"], g=point["g"], b=point["b"])
for point in data["properties"]["curves"]["hd"]
],
spectral_sensitivity=[
SpectralSensitivityCurvePoint(
wavelength=point["wavelength"],
y=point["y"],
m=point["m"],
c=point["c"],
)
for point in data["properties"]["curves"]["spectral_sensitivity"]
],
)
properties = Properties(
calibration=calibration,
halation=halation,
couplers=couplers,
interlayer=interlayer,
curves=curves,
)
return FilmDatasheet(
info=info, processing=processing, properties=properties
)
except Exception as e:
print(f"Error parsing datasheet JSON '{json_filepath}': {e}", file=sys.stderr)
return None
# --- Core Simulation Functions ---
### PERFORMANCE ###
def apply_3d_lut(image: np.ndarray, lut: np.ndarray) -> np.ndarray:
"""
Applies a 3D LUT to an image using trilinear interpolation.
Args:
image: Input image, shape (H, W, 3), values expected in [0, 1].
lut: 3D LUT, shape (N, N, N, 3).
Returns:
Output image, shape (H, W, 3).
"""
lut_dim = lut.shape[0]
# Scale image coordinates to LUT indices
scaled_coords = image * (lut_dim - 1)
# map_coordinates requires coordinates in shape (3, H, W)
coords = np.transpose(scaled_coords, (2, 0, 1))
# Interpolate each output channel
out_r = map_coordinates(lut[..., 0], coords, order=1, mode="nearest")
out_g = map_coordinates(lut[..., 1], coords, order=1, mode="nearest")
out_b = map_coordinates(lut[..., 2], coords, order=1, mode="nearest")
return np.stack([out_r, out_g, out_b], axis=-1)
### PERFORMANCE ###
def create_exposure_lut(
spectral_data: list[SpectralSensitivityCurvePoint],
ref_illuminant_spd,
middle_gray_logE: float,
size: int,
) -> np.ndarray:
"""Generates a 3D LUT for the linear sRGB -> Log Exposure conversion."""
print(f"Generating {size}x{size}x{size} exposure LUT...")
# Create a grid of sRGB input values
grid_points = np.linspace(0.0, 1.0, size)
r, g, b = np.meshgrid(grid_points, grid_points, grid_points, indexing="ij")
# Reshape grid into an "image" for batch processing
srgb_grid = np.stack([r, g, b], axis=-1).reshape(
-1, 3
) # Shape: (size*size*size, 3)
srgb_grid = srgb_grid.reshape(size, size * size, 3) # Make it 2D for the function
# Run the full spectral calculation on this grid
lut_data = calculate_film_log_exposure(
srgb_grid, spectral_data, ref_illuminant_spd, middle_gray_logE, EPSILON
)
# Reshape back into a 3D LUT
return lut_data.reshape(size, size, size, 3)
### PERFORMANCE ###
def create_density_lut(
processing: Processing,
hd_curve: List[HDCurvePoint],
middle_gray_logE: float,
log_exposure_range: tuple,
size: int,
) -> np.ndarray:
"""Generates a 3D LUT for the Log Exposure -> Density conversion."""
print(
f"Generating {size}x{size}x{size} density LUT for range {log_exposure_range}..."
)
le_min, le_max = log_exposure_range
grid_points = np.linspace(le_min, le_max, size)
le_r, le_g, le_b = np.meshgrid(grid_points, grid_points, grid_points, indexing="ij")
log_exposure_grid = np.stack([le_r, le_g, le_b], axis=-1)
lut_data = apply_hd_curves(
log_exposure_grid, processing, hd_curve, middle_gray_logE
)
return lut_data
# --- (The rest of the core functions: compute_inhibitor_matrix, compute_uncoupled_hd_curves,
# apply_dir_coupler_simulation, um_to_pixels, apply_hd_curves, apply_saturation_rgb,
# apply_spatial_effects, calculate_film_log_exposure, remain UNCHANGED from the previous version) ---
def compute_inhibitor_matrix(
amount_rgb: List[float], diffusion_interlayer: float
) -> np.ndarray:
matrix = np.eye(3)
if diffusion_interlayer > 0:
matrix = gaussian_filter1d(
matrix, sigma=diffusion_interlayer, axis=0, mode="constant", cval=0
)
matrix = gaussian_filter1d(
matrix, sigma=diffusion_interlayer, axis=1, mode="constant", cval=0
)
row_sums = matrix.sum(axis=1)
row_sums[row_sums == 0] = 1 # Avoid division by zero
matrix = matrix / row_sums[:, np.newaxis]
return matrix * np.array(amount_rgb)[:, np.newaxis]
def compute_uncoupled_hd_curves(
hd_curve_data: List[HDCurvePoint], inhibitor_matrix: np.ndarray
) -> List[HDCurvePoint]:
log_E_values = np.array([p.d for p in hd_curve_data if p.d is not None])
density_r, density_g, density_b = (
np.array([p.r for p in hd_curve_data]),
np.array([p.g for p in hd_curve_data]),
np.array([p.b for p in hd_curve_data]),
)
source_density_matrix = np.tile(density_g[:, np.newaxis], (1, 3))
inhibitor_effect = source_density_matrix @ inhibitor_matrix
uncoupled_r = np.interp(
log_E_values, log_E_values - inhibitor_effect[:, 0], density_r
)
uncoupled_g = np.interp(
log_E_values, log_E_values - inhibitor_effect[:, 1], density_g
)
uncoupled_b = np.interp(
log_E_values, log_E_values - inhibitor_effect[:, 2], density_b
)
return [
HDCurvePoint(d=log_e, r=uncoupled_r[i], g=uncoupled_g[i], b=uncoupled_b[i])
for i, log_e in enumerate(log_E_values)
]
def apply_dir_coupler_simulation(
log_exposure_rgb,
naive_density_rgb,
inhibitor_matrix,
diffusion_um,
film_format_mm,
image_width_px,
):
diffusion_pixels = um_to_pixels(diffusion_um, image_width_px, film_format_mm)
inhibitor_signal_diffused = (
gaussian_filter(
naive_density_rgb, sigma=(diffusion_pixels, diffusion_pixels, 0)
)
if diffusion_pixels > EPSILON
else naive_density_rgb
)
inhibitor_effect = np.einsum(
"...s, sm -> ...m", inhibitor_signal_diffused, inhibitor_matrix
)
return log_exposure_rgb - inhibitor_effect
def um_to_pixels(sigma_um, image_width_px, film_format_mm):
if film_format_mm <= 0 or image_width_px <= 0:
return 0
microns_per_pixel = (film_format_mm * 1000.0) / image_width_px
return sigma_um / microns_per_pixel
def apply_hd_curves(
log_exposure_rgb,
processing: Processing,
hd_curve: List[HDCurvePoint],
middle_gray_logE: float,
) -> np.ndarray:
density_rgb, gamma_factors, balance_shifts = (
np.zeros_like(log_exposure_rgb),
[
processing.gamma.r_factor,
processing.gamma.g_factor,
processing.gamma.b_factor,
],
[
processing.balance.r_shift,
processing.balance.g_shift,
processing.balance.b_shift,
],
)
log_e_points = [p.d for p in hd_curve if p.d is not None]
min_logE, max_logE = log_e_points[0], log_e_points[-1]
for i, channel in enumerate(["R", "G", "B"]):
gamma_factor = gamma_factors[i]
log_exposure_adjusted = middle_gray_logE + (
log_exposure_rgb[..., i] - middle_gray_logE
) / (gamma_factor if abs(gamma_factor) > EPSILON else EPSILON)
y_points = (
[p.r for p in hd_curve]
if channel == "R"
else [p.g for p in hd_curve] if channel == "G" else [p.b for p in hd_curve]
)
interp_func = interp1d(
log_e_points,
y_points,
kind="linear",
bounds_error=False,
fill_value=(y_points[0], y_points[-1]), # type: ignore
)
density_rgb[..., i] = np.maximum(
interp_func(np.clip(log_exposure_adjusted, min_logE, max_logE))
+ balance_shifts[i],
0,
)
return density_rgb
def apply_saturation_rgb(image_linear, saturation_factor):
if saturation_factor == 1.0:
return image_linear
luminance = np.einsum(
"...c, c -> ...", image_linear, np.array([0.2126, 0.7152, 0.0722])
)
return np.clip(
np.expand_dims(luminance, axis=-1)
+ saturation_factor * (image_linear - np.expand_dims(luminance, axis=-1)),
0.0,
1.0,
)
def white_balance_to_d65(image_linear_srgb: np.ndarray) -> np.ndarray:
"""
Attempts to white balance a linear sRGB image to a D65 illuminant.
This function is for non-RAW images where the white balance is already baked in.
It uses the "Gray World" assumption to estimate the current illuminant.
Args:
image_linear_srgb: A numpy array representing the image in linear sRGB space.
Returns:
A numpy array of the white-balanced image, also in linear sRGB space.
"""
print("Attempting to force D65 white balance using Gray World estimation...")
# 1. Estimate the illuminant of the source image.
# The Gray World assumption states that the average pixel color is the illuminant.
source_illuminant_rgb = np.mean(image_linear_srgb, axis=(0, 1))
# Avoid division by zero if the image is black
if np.all(source_illuminant_rgb < EPSILON):
return image_linear_srgb
# 2. Define the input and output color spaces and illuminants.
# We assume the image is sRGB, which also uses a D65 white point *by definition*.
# However, if the image has a color cast, its *actual* illuminant is not D65.
# We are adapting from the *estimated* illuminant back to the *ideal* D65.
srgb_cs = colour.models.RGB_COLOURSPACE_sRGB
# The target illuminant is D65. We get its XYZ value from colour-science.
target_illuminant_xyz = colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65']
# Convert our estimated RGB illuminant to XYZ.
# source_illuminant_rgb are assumed to be in the sRGB colourspace.
# srgb_cs is the sRGB colourspace definition, which includes its D65 whitepoint and conversion matrix.
source_illuminant_xyz = colour.RGB_to_XYZ(
source_illuminant_rgb, # RGB values to convert
srgb_cs, # Definition of the RGB colourspace these values are in (sRGB)
srgb_cs.whitepoint # CIE XYZ of the illuminant for the sRGB colourspace (D65)
# This ensures conversion without further chromatic adaptation at this step,
# as the input RGB values are already adapted to srgb_cs.whitepoint.
)
# 3. Perform the chromatic adaptation.
# We use the Von Kries transform, which is a common and effective method.
image_adapted_linear_srgb = colour.adaptation.chromatic_adaptation(
image_linear_srgb,
source_illuminant_xyz,
target_illuminant_xyz,
method='Von Kries',
transform='CAT02' # CAT02 is a well-regarded choice
)
# 4. Clip to ensure values remain valid.
return np.clip(image_adapted_linear_srgb, 0.0, 1.0)
def apply_spatial_effects(
image,
film_format_mm,
couplerData: Couplers,
interlayerData: Interlayer,
halationData: Halation,
image_width_px,
):
sigma_pixels_diffusion = um_to_pixels(
interlayerData.diffusion_um, image_width_px, film_format_mm
)
if sigma_pixels_diffusion > EPSILON:
image = gaussian_filter(
image,
sigma=[sigma_pixels_diffusion, sigma_pixels_diffusion, 0],
mode="nearest",
)
halation_applied, blurred_image_halation = False, np.copy(image)
strengths, sizes_um = [
halationData.strength.r,
halationData.strength.g,
halationData.strength.b,
], [halationData.size_um.r, halationData.size_um.g, halationData.size_um.b]
for i in range(3):
sigma_pixels_halation = um_to_pixels(
sizes_um[i], image_width_px, film_format_mm
)
if strengths[i] > EPSILON and sigma_pixels_halation > EPSILON:
halation_applied = True
channel_blurred = gaussian_filter(
image[..., i], sigma=sigma_pixels_halation, mode="nearest"
)
blurred_image_halation[..., i] = (
image[..., i] * (1.0 - strengths[i]) + channel_blurred * strengths[i]
)
return (
np.clip(blurred_image_halation, 0.0, 1.0)
if halation_applied
else np.clip(image, 0.0, 1.0)
)
def calculate_film_log_exposure(
image_linear_srgb,
spectral_data: list[SpectralSensitivityCurvePoint],
ref_illuminant_spd,
middle_gray_logE,
EPSILON,
):
common_shape, common_wavelengths = (
colour.SpectralShape(380, 780, 5),
colour.SpectralShape(380, 780, 5).wavelengths,
)
sensitivities = np.stack(
[
interp1d(
[p.wavelength for p in spectral_data],
[p.c for p in spectral_data],
bounds_error=False,
fill_value=0,
)(common_wavelengths),
interp1d(
[p.wavelength for p in spectral_data],
[p.m for p in spectral_data],
bounds_error=False,
fill_value=0,
)(common_wavelengths),
interp1d(
[p.wavelength for p in spectral_data],
[p.y for p in spectral_data],
bounds_error=False,
fill_value=0,
)(common_wavelengths),
],
axis=-1,
)
illuminant_aligned = ref_illuminant_spd.copy().align(common_shape)
mallett_basis_aligned = (
colour.recovery.MSDS_BASIS_FUNCTIONS_sRGB_MALLETT2019.copy().align(common_shape)
)
spectral_reflectance = np.einsum(
"...c, kc -> ...k", image_linear_srgb, mallett_basis_aligned.values
)
light_from_scene = spectral_reflectance * illuminant_aligned.values
film_exposure_values = np.einsum(
"...k, ks -> ...s", light_from_scene, sensitivities
)
gray_reflectance = np.einsum(
"c, kc -> k", np.array([0.184, 0.184, 0.184]), mallett_basis_aligned.values
)
gray_light = gray_reflectance * illuminant_aligned.values
exposure_18_gray_film = np.einsum("k, ks -> s", gray_light, sensitivities)
log_shift = middle_gray_logE - np.log10(exposure_18_gray_film[1] + EPSILON)
return np.log10(film_exposure_values + EPSILON) + log_shift
# --- Main Execution ---
def main():
parser = argparse.ArgumentParser(
description="Simulate film stock color characteristics using a datasheet JSON."
)
parser.add_argument(
"input_image",
help="Path to the input RGB image (e.g., PNG, TIFF). Assumed linear RGB.",
)
parser.add_argument("datasheet_json", help="Path to the film datasheet JSON file.")
parser.add_argument("output_image", help="Path to save the output emulated image.")
parser.add_argument(
"--no-cache", action="store_true", help="Force regeneration of LUTs and do not save them."
)
parser.add_argument(
"--force-d65", action="store_true", help="Force white balance when loading RAW files."
)
args = parser.parse_args()
datasheet: FilmDatasheet | None = parse_datasheet_json(args.datasheet_json)
if datasheet is None:
sys.exit(1)
print(
f"Simulating: {datasheet.info.name} ({datasheet.info.format_mm}mm) (v{datasheet.info.version})\n\t{datasheet.info.description}"
)
# --- LUT Generation and Pre-computation ---
cache_dir = Path.home() / ".filmsim"
# Sanitize datasheet name for use in a filename
safe_name = "".join(c for c in datasheet.info.name if c.isalnum() or c in (' ', '_')).rstrip()
base_filename = f"{safe_name.replace(' ', '_')}_v{datasheet.info.version}_size{LUT_SIZE}"
exposure_lut_path = cache_dir / f"{base_filename}_exposure.cube"
naive_density_lut_path = cache_dir / f"{base_filename}_naive_density.cube"
uncoupled_density_lut_path = cache_dir / f"{base_filename}_uncoupled_density.cube"
user_consent_to_save = None
middle_gray_logE = float(datasheet.properties.calibration.middle_gray_logE)
exposure_lut: Optional[np.ndarray] = None # Initialize exposure_lut
### PERFORMANCE ###
# Try to load exposure LUT from cache first
if not args.no_cache and exposure_lut_path.exists():
try:
print(f"Loading exposure LUT from cache: {exposure_lut_path}")
loaded_obj = colour.read_LUT(str(exposure_lut_path))
if isinstance(loaded_obj, colour.LUT3D):
exposure_lut = loaded_obj.table
print(f"Successfully loaded exposure LUT from {exposure_lut_path}")
else:
# Log a warning and fall through to regeneration
print(f"Warning: Cached exposure LUT {exposure_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.")
except Exception as e:
# Log a warning and fall through to regeneration
print(f"Warning: Error loading exposure LUT from {exposure_lut_path}: {e}. Will regenerate.")
# If LUT wasn't loaded from cache (or cache was disabled, or loading failed), generate it
if exposure_lut is None:
print("Generating new exposure LUT...")
generated_exposure_lut_table = create_exposure_lut(
datasheet.properties.curves.spectral_sensitivity,
SDS_ILLUMINANTS["D65"],
middle_gray_logE,
size=LUT_SIZE,
)
exposure_lut = generated_exposure_lut_table # Assign to the main variable used later
# Save the newly generated LUT if caching is enabled
if not args.no_cache:
if user_consent_to_save is None: # Ask for consent only once per run if needed
response = input(f"Save generated LUTs to {cache_dir} for future use? [Y/n]: ").lower()
user_consent_to_save = response in ('y', 'yes', '')
if user_consent_to_save:
try:
cache_dir.mkdir(parents=True, exist_ok=True)
colour.write_LUT(colour.LUT3D(table=generated_exposure_lut_table, name=f"{base_filename}_exposure"), str(exposure_lut_path))
print(f"Saved exposure LUT to {exposure_lut_path}")
except Exception as e:
print(f"Error saving exposure LUT to {exposure_lut_path}: {e}", file=sys.stderr)
# Ensure exposure_lut is now populated. If not, something went wrong.
if exposure_lut is None:
# This case should ideally not be reached if create_exposure_lut always returns a valid LUT or raises an error.
print("Critical error: Exposure LUT could not be loaded or generated. Exiting.", file=sys.stderr)
sys.exit(1)
# For density LUTs, we need to know the typical range of log exposure values
# We can estimate this from the H&D curve data itself.
log_e_points = [p.d for p in datasheet.properties.curves.hd if p.d is not None]
log_exposure_range = (log_e_points[0], log_e_points[-1])
density_lut_naive: Optional[np.ndarray] = None # Initialize naive density LUT
if not args.no_cache and naive_density_lut_path.exists():
try:
print(f"Loading naive density LUT from cache: {naive_density_lut_path}")
loaded_obj = colour.read_LUT(str(naive_density_lut_path))
if isinstance(loaded_obj, colour.LUT3D):
density_lut_naive = loaded_obj.table
print(f"Successfully loaded naive density LUT from {naive_density_lut_path}")
else:
# Log a warning and fall through to regeneration
print(f"Warning: Cached naive density LUT {naive_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.")
except Exception as e:
# Log a warning and fall through to regeneration
print(f"Warning: Error loading naive density LUT from {naive_density_lut_path}: {e}. Will regenerate.")
if density_lut_naive is None:
print("Generating new naive density LUT...")
density_lut_naive = create_density_lut(
datasheet.processing,
datasheet.properties.curves.hd,
middle_gray_logE,
log_exposure_range,
size=LUT_SIZE,
)
if not args.no_cache:
if user_consent_to_save is None:
response = input(f"Save generated naive density LUT to {cache_dir} for future use? [Y/n]: ").lower()
user_consent_to_save = response in ('y', 'yes', '')
if user_consent_to_save:
try:
cache_dir.mkdir(parents=True, exist_ok=True)
colour.write_LUT(
colour.LUT3D(table=density_lut_naive, name=f"{base_filename}_naive_density"),
str(naive_density_lut_path),
)
print(f"Saved naive density LUT to {naive_density_lut_path}")
except Exception as e:
print(f"Error saving naive density LUT to {naive_density_lut_path}: {e}", file=sys.stderr)
if density_lut_naive is None:
# This case should ideally not be reached if create_density_lut always returns a valid LUT or raises an error.
print("Critical error: Naive density LUT could not be loaded or generated. Exiting.", file=sys.stderr)
sys.exit(1)
inhibitor_matrix = compute_inhibitor_matrix(
datasheet.properties.couplers.dir_amount_rgb,
datasheet.properties.couplers.dir_diffusion_interlayer,
)
if not args.no_cache and uncoupled_density_lut_path.exists():
try:
print(f"Loading uncoupled density LUT from cache: {uncoupled_density_lut_path}")
loaded_obj = colour.read_LUT(str(uncoupled_density_lut_path))
if isinstance(loaded_obj, colour.LUT3D):
density_lut_uncoupled = loaded_obj.table
print(f"Successfully loaded uncoupled density LUT from {uncoupled_density_lut_path}")
else:
# Log a warning and fall through to regeneration
print(f"Warning: Cached uncoupled density LUT {uncoupled_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.")
except Exception as e:
# Log a warning and fall through to regeneration
print(f"Warning: Error loading uncoupled density LUT from {uncoupled_density_lut_path}: {e}. Will regenerate.")
else:
uncoupled_hd_curve = compute_uncoupled_hd_curves(
datasheet.properties.curves.hd, inhibitor_matrix
)
density_lut_uncoupled = create_density_lut(
datasheet.processing,
uncoupled_hd_curve,
middle_gray_logE,
log_exposure_range,
size=LUT_SIZE,
)
if not args.no_cache:
if user_consent_to_save is None:
response = input(f"Save generated uncoupled density LUT to {cache_dir} for future use? [Y/n]: ").lower()
user_consent_to_save = response in ('y', 'yes', '')
if user_consent_to_save:
try:
cache_dir.mkdir(parents=True, exist_ok=True)
colour.write_LUT(
colour.LUT3D(table=density_lut_uncoupled, name=f"{base_filename}_uncoupled_density"),
str(uncoupled_density_lut_path),
)
print(f"Saved uncoupled density LUT to {uncoupled_density_lut_path}")
except Exception as e:
print(f"Error saving uncoupled density LUT to {uncoupled_density_lut_path}: {e}", file=sys.stderr)
if density_lut_uncoupled is None:
# This case should ideally not be reached if create_density_lut always returns a valid LUT or raises an error.
print("Critical error: Uncoupled density LUT could not be loaded or generated. Exiting.", file=sys.stderr)
sys.exit(1)
# --- Load and Prepare Input Image ---
try:
if any(
args.input_image.lower().endswith(ext) for ext in [".dng", ".raw", ".arw"]
):
with rawpy.imread(args.input_image) as raw:
if args.force_d65:
wb_params = {'user_wb': (1.0, 1.0, 1.0, 1.0)} # D65 white balance
image_raw = raw.postprocess(
demosaic_algorithm=rawpy.DemosaicAlgorithm.AHD, # type: ignore
output_bps=16,
gamma=(1.0, 1.0),
no_auto_bright=True,
output_color=rawpy.ColorSpace.sRGB, # type: ignore
**(wb_params if args.force_d65 else {'user_camera_wb': True}),
)
image_linear = image_raw.astype(np.float64) / 65535.0
print("RAW file processed to linear floating point directly.")
else:
image_raw = iio.imread(args.input_image)
image_float = image_raw.astype(np.float64) / (
65535.0 if image_raw.dtype == np.uint16 else 255.0
)
image_linear = (
colour.cctf_decoding(image_float, function="sRGB")
if image_raw.dtype in (np.uint8, np.uint16)
else image_float
)
if args.force_d65:
image_linear = white_balance_to_d65(image_linear)
except Exception as e:
print(f"Error reading input image: {e}", file=sys.stderr)
sys.exit(1)
if image_linear.shape[-1] > 3:
image_linear = image_linear[..., :3]
image_linear, image_width_px = np.maximum(image_linear, 0.0), image_linear.shape[1]
# --- Pipeline Steps ---
### PERFORMANCE ###
# 1. Convert Linear RGB to Log Exposure using the LUT
print("Applying exposure LUT...")
log_exposure_rgb = apply_3d_lut(image_linear, exposure_lut)
# 2. Apply DIR Coupler Simulation
print("Applying DIR coupler simulation...")
# 2a. Calculate "naive" density using its LUT
naive_density_rgb = apply_3d_lut(
(log_exposure_rgb - log_exposure_range[0])
/ (log_exposure_range[1] - log_exposure_range[0]),
density_lut_naive,
)
# naive_density_rgb = apply_hd_curves(log_exposure_rgb, datasheet.processing, datasheet.properties.curves.hd, middle_gray_logE) # Apply H&D curves to naive density
# 2b. Use this density to modify log exposure
modified_log_exposure_rgb = apply_dir_coupler_simulation(
log_exposure_rgb,
naive_density_rgb,
inhibitor_matrix,
datasheet.properties.couplers.dir_diffusion_um,
datasheet.info.format_mm,
image_width_px,
)
# 3. Apply the *uncoupled* density LUT to the *modified* exposure
print("Applying uncoupled density LUT...")
# Normalize the modified log exposure to the [0,1] range for LUT lookup
norm_log_exposure = (modified_log_exposure_rgb - log_exposure_range[0]) / (
log_exposure_range[1] - log_exposure_range[0]
)
density_rgb = apply_3d_lut(np.clip(norm_log_exposure, 0, 1), density_lut_uncoupled)
# (Rest of the pipeline is unchanged)
print("Converting density to linear transmittance...")
linear_transmittance = np.clip(10.0 ** (-density_rgb), 0.0, 1.0)
print("Applying spatial effects (diffusion, halation)...")
linear_post_spatial = apply_spatial_effects(
linear_transmittance,
datasheet.info.format_mm,
datasheet.properties.couplers,
datasheet.properties.interlayer,
datasheet.properties.halation,
image_width_px,
)
print("Applying saturation adjustment...")
linear_post_saturation = apply_saturation_rgb(
linear_post_spatial, datasheet.properties.couplers.saturation_amount
)
print("Converting to output format and saving...")
output_image = (
np.clip(linear_post_saturation, 0.0, 1.0)
* (65535.0 if args.output_image.lower().endswith((".tiff", ".tif")) else 255.0)
).astype(
np.uint16 if args.output_image.lower().endswith((".tiff", ".tif")) else np.uint8
)
iio.imwrite(args.output_image, output_image)
print("Done.")
if __name__ == "__main__":
main()