v1.1 - Spectral simulation, but very slow
This commit is contained in:
97
filmcolor
97
filmcolor
@ -6,6 +6,7 @@
|
||||
# "Pillow",
|
||||
# "imageio",
|
||||
# "rawpy",
|
||||
# "colour",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
@ -29,6 +30,8 @@ from scipy.ndimage import gaussian_filter
|
||||
import rawpy
|
||||
import os
|
||||
import sys
|
||||
import colour
|
||||
from colour.colorimetry import SDS_ILLUMINANTS
|
||||
import json
|
||||
|
||||
# --- Configuration ---
|
||||
@ -491,7 +494,7 @@ def apply_spatial_effects(
|
||||
|
||||
# --- 2. Apply Halation ---
|
||||
# This simulates light scattering back through the emulsion
|
||||
halation_applied = False
|
||||
halation_applied = True
|
||||
blurred_image_halation = np.copy(
|
||||
image
|
||||
) # Start with potentially diffusion-blurred image
|
||||
@ -535,6 +538,89 @@ def apply_spatial_effects(
|
||||
return image
|
||||
|
||||
|
||||
def calculate_film_log_exposure(image_linear_srgb, # Assuming input is converted to linear sRGB
|
||||
spectral_data: list[SpectralSensitivityCurvePoint],
|
||||
ref_illuminant_spd, # e.g., colour.SDS_ILLUMINANTS['D65']
|
||||
middle_gray_logE,
|
||||
EPSILON=1e-10):
|
||||
"""
|
||||
Calculates the effective log exposure for each film layer based on spectral sensitivities.
|
||||
This version correctly maps film layers (RGB) to dye sensitivities (CMY)
|
||||
and calibrates exposure without distorting color balance.
|
||||
"""
|
||||
# --- 1. Define a common spectral shape and align all data ---
|
||||
common_shape = colour.SpectralShape(380, 780, 5)
|
||||
common_wavelengths = common_shape.wavelengths
|
||||
|
||||
# --- THE FIX IS HERE: Correct mapping of film layers to sensitivities ---
|
||||
# The 'R' layer of our model corresponds to the Cyan dye ('c') sensitivity curve.
|
||||
# The 'G' layer of our model corresponds to the Magenta dye ('m') sensitivity curve.
|
||||
# The 'B' layer of our model corresponds to the Yellow dye ('y') sensitivity curve.
|
||||
film_sens_R = interp1d([p.wavelength for p in spectral_data], [p.c for p in spectral_data], bounds_error=False, fill_value=0)(common_wavelengths)
|
||||
film_sens_G = interp1d([p.wavelength for p in spectral_data], [p.m for p in spectral_data], bounds_error=False, fill_value=0)(common_wavelengths)
|
||||
film_sens_B = interp1d([p.wavelength for p in spectral_data], [p.y for p in spectral_data], bounds_error=False, fill_value=0)(common_wavelengths)
|
||||
|
||||
film_spectral_sensitivities = np.stack([film_sens_R, film_sens_G, film_sens_B], axis=-1)
|
||||
print(f"Film spectral sensitivities shape: {film_spectral_sensitivities.shape}")
|
||||
|
||||
# Align Reference Illuminant to our common shape
|
||||
illuminant_aligned = ref_illuminant_spd.copy().align(common_shape)
|
||||
|
||||
# --- 2. Use Mallett (2019) to get spectral reflectance from sRGB input ---
|
||||
# Get the three sRGB spectral primary basis functions
|
||||
mallett_basis_sds = colour.recovery.MSDS_BASIS_FUNCTIONS_sRGB_MALLETT2019
|
||||
mallett_basis_aligned = mallett_basis_sds.copy().align(common_shape)
|
||||
print(f"Mallett basis shape: {mallett_basis_aligned.shape}")
|
||||
|
||||
# This is the core of Mallett's method: the reflectance spectrum of any sRGB color
|
||||
# is a linear combination of these three basis spectra, weighted by the linear sRGB values.
|
||||
# S(lambda) = R * Sr(lambda) + G * Sg(lambda) + B * Sb(lambda)
|
||||
spectral_reflectance = np.einsum(
|
||||
'...c, kc -> ...k', # c: sRGB channels, k: wavelengths
|
||||
image_linear_srgb, mallett_basis_aligned.values
|
||||
)
|
||||
print(f"Spectral reflectance shape: {spectral_reflectance.shape}")
|
||||
|
||||
# --- 3. Calculate Scene Light and Film Exposure ---
|
||||
# The light hitting the film is the spectral reflectance multiplied by the scene illuminant.
|
||||
light_from_scene = spectral_reflectance * illuminant_aligned.values
|
||||
print(f"Light from scene shape: {light_from_scene.shape}")
|
||||
|
||||
# The exposure on each film layer is the integral of scene light * layer sensitivity.
|
||||
# The einsum here performs that multiplication and summation (integration) in one step.
|
||||
film_exposure_values = np.einsum(
|
||||
'...k, ks -> ...s', # k: wavelengths, s: film layers
|
||||
light_from_scene, film_spectral_sensitivities
|
||||
)
|
||||
print(f"Film exposure values shape: {film_exposure_values.shape}")
|
||||
|
||||
# --- 4. Calibrate Exposure Level (The Right Way) ---
|
||||
# We now need to anchor this exposure to the datasheet's middle_gray_logE.
|
||||
# First, find the exposure produced by a perfect 18.4% gray card.
|
||||
gray_srgb_linear = np.array([0.184, 0.184, 0.184])
|
||||
gray_reflectance = np.einsum('c, kc -> k', gray_srgb_linear, mallett_basis_aligned.values)
|
||||
gray_light = gray_reflectance * illuminant_aligned.values
|
||||
exposure_18_gray_film = np.einsum('k, ks -> s', gray_light, film_spectral_sensitivities)
|
||||
print(f"Exposure for 18.4% gray card shape: {exposure_18_gray_film.shape}")
|
||||
|
||||
# The datasheet says that a middle gray exposure should result in a log value of -1.44 (for Portra).
|
||||
# Our "green" channel is the usual reference for overall brightness.
|
||||
# Let's find out what log exposure our 18.4% gray card *actually* produced on the green layer.
|
||||
log_exposure_of_gray_on_green_layer = np.log10(exposure_18_gray_film[1] + EPSILON)
|
||||
|
||||
# Now, calculate the difference (the "shift") needed to match the datasheet.
|
||||
log_shift = middle_gray_logE - log_exposure_of_gray_on_green_layer
|
||||
print(f"Log shift to match middle gray: {log_shift:.3f}")
|
||||
|
||||
# --- 5. Final Conversion to Log Exposure ---
|
||||
# Apply this single brightness shift to all three channels. This preserves the
|
||||
# film's inherent color balance while correctly setting the exposure level.
|
||||
log_exposure_rgb = np.log10(film_exposure_values + EPSILON) + log_shift
|
||||
print(f"Log exposure RGB shape: {log_exposure_rgb.shape}")
|
||||
|
||||
return log_exposure_rgb
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simulate film stock color characteristics using a datasheet JSON."
|
||||
@ -650,10 +736,11 @@ def main():
|
||||
print("Converting linear RGB to Log Exposure...")
|
||||
middle_gray_logE = float(datasheet.properties.calibration.middle_gray_logE)
|
||||
# Add epsilon inside log10 to handle pure black pixels
|
||||
log_exposure_rgb = middle_gray_logE + np.log10(image_linear / 0.18 + EPSILON)
|
||||
# Note: Values below 0.18 * 10**(hd_data['LogE'][0] - middle_gray_logE)
|
||||
# or above 0.18 * 10**(hd_data['LogE'][-1] - middle_gray_logE)
|
||||
# will map outside the H&D curve's defined LogE range and rely on clamping/extrapolation.
|
||||
log_exposure_rgb = calculate_film_log_exposure(image_linear,
|
||||
datasheet.properties.curves.spectral_sensitivity,
|
||||
SDS_ILLUMINANTS['D65'], # Use D65 as reference illuminant
|
||||
middle_gray_logE, EPSILON)
|
||||
|
||||
|
||||
# 2. Apply H&D Curves (Tonal Mapping + Balance Shifts + Gamma/Contrast)
|
||||
print("Applying H&D curves...")
|
||||
|
Reference in New Issue
Block a user