many fixes to color, generally good output now
This commit is contained in:
283
filmcolor
283
filmcolor
@ -679,27 +679,32 @@ def calculate_film_log_exposure(
|
|||||||
middle_gray_logE,
|
middle_gray_logE,
|
||||||
EPSILON,
|
EPSILON,
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Converts linear sRGB values to per-channel log exposure values that the
|
||||||
|
film's layers would receive. This version includes per-channel calibration.
|
||||||
|
"""
|
||||||
common_shape, common_wavelengths = (
|
common_shape, common_wavelengths = (
|
||||||
colour.SpectralShape(380, 780, 5),
|
colour.SpectralShape(380, 780, 5),
|
||||||
colour.SpectralShape(380, 780, 5).wavelengths,
|
colour.SpectralShape(380, 780, 5).wavelengths,
|
||||||
)
|
)
|
||||||
|
# The order here is critical: C, M, Y dye layers are sensitive to R, G, B light respectively.
|
||||||
sensitivities = np.stack(
|
sensitivities = np.stack(
|
||||||
[
|
[
|
||||||
interp1d(
|
interp1d(
|
||||||
[p.wavelength for p in spectral_data],
|
[p.wavelength for p in spectral_data],
|
||||||
[p.c for p in spectral_data],
|
[p.c for p in spectral_data], # Cyan layer is Red-sensitive
|
||||||
bounds_error=False,
|
bounds_error=False,
|
||||||
fill_value=0,
|
fill_value=0,
|
||||||
)(common_wavelengths),
|
)(common_wavelengths),
|
||||||
interp1d(
|
interp1d(
|
||||||
[p.wavelength for p in spectral_data],
|
[p.wavelength for p in spectral_data],
|
||||||
[p.m for p in spectral_data],
|
[p.m for p in spectral_data], # Magenta layer is Green-sensitive
|
||||||
bounds_error=False,
|
bounds_error=False,
|
||||||
fill_value=0,
|
fill_value=0,
|
||||||
)(common_wavelengths),
|
)(common_wavelengths),
|
||||||
interp1d(
|
interp1d(
|
||||||
[p.wavelength for p in spectral_data],
|
[p.wavelength for p in spectral_data],
|
||||||
[p.y for p in spectral_data],
|
[p.y for p in spectral_data], # Yellow layer is Blue-sensitive
|
||||||
bounds_error=False,
|
bounds_error=False,
|
||||||
fill_value=0,
|
fill_value=0,
|
||||||
)(common_wavelengths),
|
)(common_wavelengths),
|
||||||
@ -722,8 +727,15 @@ def calculate_film_log_exposure(
|
|||||||
)
|
)
|
||||||
gray_light = gray_reflectance * illuminant_aligned.values
|
gray_light = gray_reflectance * illuminant_aligned.values
|
||||||
exposure_18_gray_film = np.einsum("k, ks -> s", gray_light, sensitivities)
|
exposure_18_gray_film = np.einsum("k, ks -> s", gray_light, sensitivities)
|
||||||
log_shift = middle_gray_logE - np.log10(exposure_18_gray_film[1] + EPSILON)
|
|
||||||
return np.log10(film_exposure_values + EPSILON) + log_shift
|
# --- CORRECTED LOGIC ---
|
||||||
|
# Instead of a single scalar shift based on the green channel, we calculate
|
||||||
|
# a vector of three shifts, one for each channel (R, G, B). This ensures
|
||||||
|
# each layer is independently calibrated against its own response to gray.
|
||||||
|
log_shift_per_channel = middle_gray_logE - np.log10(exposure_18_gray_film + EPSILON)
|
||||||
|
|
||||||
|
# Apply the per-channel shift to the per-channel exposure values.
|
||||||
|
return np.log10(film_exposure_values + EPSILON) + log_shift_per_channel
|
||||||
|
|
||||||
def apply_spatial_effects_new(
|
def apply_spatial_effects_new(
|
||||||
image: np.ndarray,
|
image: np.ndarray,
|
||||||
@ -830,6 +842,236 @@ def apply_spatial_effects_new(
|
|||||||
|
|
||||||
return result_numpy
|
return result_numpy
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --- Physically-Based Scanner Data ---
|
||||||
|
# This data represents a more physical model of scanner components. The spectral
|
||||||
|
# data is representative and designed to create physically-plausible results.
|
||||||
|
|
||||||
|
# Define a common spectral shape for all our operations
|
||||||
|
COMMON_SPECTRAL_SHAPE = colour.SpectralShape(380, 780, 10)
|
||||||
|
|
||||||
|
# 1. Scanner Light Source Spectra (SPD - Spectral Power Distribution)
|
||||||
|
# Modeled based on known lamp types for these scanners.
|
||||||
|
SDS_SCANNER_LIGHT_SOURCES = {
|
||||||
|
# Hasselblad/Flextight: Simulates a Cold Cathode Fluorescent Lamp (CCFL)
|
||||||
|
# Characterized by broad but spiky emission, especially in blue/green.
|
||||||
|
"hasselblad": colour.sd_multi_leds(
|
||||||
|
[450, 540, 610],
|
||||||
|
half_spectral_widths=[20, 30, 25] # half spectral widths (FWHM/2)
|
||||||
|
).align(COMMON_SPECTRAL_SHAPE),
|
||||||
|
|
||||||
|
# Fuji Frontier: Simulates a set of narrow-band, high-intensity LEDs.
|
||||||
|
# This gives the characteristic color separation and vibrancy.
|
||||||
|
"frontier": colour.sd_multi_leds(
|
||||||
|
[465, 535, 625], # R, G, B LED peaks
|
||||||
|
half_spectral_widths=[20, 25, 20] # Full Width at Half Maximum (very narrow)
|
||||||
|
).align(COMMON_SPECTRAL_SHAPE),
|
||||||
|
|
||||||
|
# Noritsu: Also LED-based, but often with slightly different peaks and
|
||||||
|
# calibration leading to a different color rendering.
|
||||||
|
"noritsu": colour.sd_multi_leds(
|
||||||
|
[460, 545, 630],
|
||||||
|
half_spectral_widths=[22, 28, 22]
|
||||||
|
).align(COMMON_SPECTRAL_SHAPE),
|
||||||
|
}
|
||||||
|
|
||||||
|
ADVANCED_SCANNER_PRESETS = {
|
||||||
|
"hasselblad": {
|
||||||
|
# Hasselblad/Flextight: Renowned for its neutrality and high fidelity.
|
||||||
|
# Its model aims for a "pure" rendition of the film, close to a perfect observer.
|
||||||
|
"sensitivities": {
|
||||||
|
# Based on a high-quality fluorescent light source and accurate sensor,
|
||||||
|
# approximating the CIE 1931 standard observer for maximum neutrality.
|
||||||
|
"primaries": np.array([
|
||||||
|
[0.7347, 0.2653], [0.2738, 0.7174], [0.1666, 0.0089]
|
||||||
|
]),
|
||||||
|
"whitepoint": colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'],
|
||||||
|
},
|
||||||
|
"tone_curve_params": {
|
||||||
|
"black_point": 0.005, "white_point": 0.998, "contrast": 0.25, "shoulder": 0.1
|
||||||
|
},
|
||||||
|
"color_matrix": np.identity(3), # No additional color shift; aims for accuracy.
|
||||||
|
"saturation": 1.0,
|
||||||
|
"vibrance": 0.05, # A very slight boost to color without over-saturating.
|
||||||
|
},
|
||||||
|
"frontier": {
|
||||||
|
# Fuji Frontier: The classic lab scanner look. Famous for its handling of greens and skin tones.
|
||||||
|
"sensitivities": {
|
||||||
|
# Model mimics a narrow-band LED system. The green primary is shifted slightly
|
||||||
|
# towards cyan, and the red primary is shifted slightly towards orange,
|
||||||
|
# contributing to Fuji's signature color rendering, especially in foliage and skin.
|
||||||
|
"primaries": np.array([
|
||||||
|
[0.685, 0.315], [0.250, 0.725], [0.155, 0.045]
|
||||||
|
]),
|
||||||
|
"whitepoint": colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'],
|
||||||
|
},
|
||||||
|
"tone_curve_params": {
|
||||||
|
"black_point": 0.01, "white_point": 0.995, "contrast": 0.40, "shoulder": 0.3
|
||||||
|
},
|
||||||
|
"color_matrix": np.array([
|
||||||
|
[1.0, -0.05, 0.0],
|
||||||
|
[-0.04, 1.0, -0.04],
|
||||||
|
[0.0, 0.05, 1.0]
|
||||||
|
]), # The classic Frontier color science matrix from the previous model.
|
||||||
|
"saturation": 1.05,
|
||||||
|
"vibrance": 0.15, # Higher vibrance gives it that well-known "pop".
|
||||||
|
},
|
||||||
|
"noritsu": {
|
||||||
|
# Noritsu: Known for rich, warm, and often high-contrast scans.
|
||||||
|
"sensitivities": {
|
||||||
|
# Models a different LED array with broader spectral responses. The red primary is wider,
|
||||||
|
# enhancing warmth, and the blue is very strong, creating deep, rich blues in skies.
|
||||||
|
"primaries": np.array([
|
||||||
|
[0.690, 0.310], [0.280, 0.690], [0.150, 0.050]
|
||||||
|
]),
|
||||||
|
"whitepoint": colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'],
|
||||||
|
},
|
||||||
|
"tone_curve_params": {
|
||||||
|
"black_point": 0.015, "white_point": 0.990, "contrast": 0.50, "shoulder": 0.2
|
||||||
|
},
|
||||||
|
"color_matrix": np.array([
|
||||||
|
[1.02, 0.0, -0.02],
|
||||||
|
[-0.02, 1.02, 0.0],
|
||||||
|
[-0.02, -0.02, 1.02]
|
||||||
|
]), # Stronger matrix for color separation.
|
||||||
|
"saturation": 1.1,
|
||||||
|
"vibrance": 0.1, # Boosts saturation, contributing to the rich look.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def apply_parametric_s_curve(image, black_point, white_point, contrast, shoulder):
|
||||||
|
"""Applies a flexible, non-linear S-curve for contrast."""
|
||||||
|
# 1. Linear re-scale based on black/white points
|
||||||
|
leveled = (image - black_point) / (white_point - black_point)
|
||||||
|
x = np.clip(leveled, 0.0, 1.0)
|
||||||
|
|
||||||
|
# 2. Base smoothstep curve
|
||||||
|
s_curve = 3 * x**2 - 2 * x**3
|
||||||
|
|
||||||
|
# 3. Shoulder compression using a power function
|
||||||
|
# A higher 'shoulder' value compresses highlights more, protecting them.
|
||||||
|
shoulder_curve = x ** (1.0 + shoulder)
|
||||||
|
|
||||||
|
# 4. Blend the curves based on the contrast parameter
|
||||||
|
# A contrast of 0 is linear, 1 is full s-curve + shoulder.
|
||||||
|
final_curve = (
|
||||||
|
x * (1 - contrast) +
|
||||||
|
(s_curve * (1 - shoulder) + shoulder_curve * shoulder) * contrast
|
||||||
|
)
|
||||||
|
|
||||||
|
return np.clip(final_curve, 0.0, 1.0)
|
||||||
|
|
||||||
|
def apply_vibrance(image, amount):
|
||||||
|
"""Selectively boosts saturation of less-saturated colors."""
|
||||||
|
if amount == 0:
|
||||||
|
return image
|
||||||
|
# Calculate per-pixel saturation (max(R,G,B) - min(R,G,B))
|
||||||
|
pixel_sat = np.max(image, axis=-1) - np.min(image, axis=-1)
|
||||||
|
# Create a weight map: less saturated pixels get a higher weight.
|
||||||
|
weight = 1.0 - np.clip(pixel_sat * 1.5, 0, 1) # Multiplier controls falloff
|
||||||
|
# Calculate luminance for blending
|
||||||
|
luminance = np.einsum("...c, c -> ...", image, np.array([0.2126, 0.7152, 0.0722]))
|
||||||
|
luminance = np.expand_dims(luminance, axis=-1)
|
||||||
|
# Create a fully saturated version of the image
|
||||||
|
saturated_version = np.clip(luminance + (image - luminance) * 2.0, 0.0, 1.0)
|
||||||
|
# Expand weight to match image dimensions (H,W) -> (H,W,1)
|
||||||
|
weight = np.expand_dims(weight, axis=-1)
|
||||||
|
# Blend the original with the saturated version based on the weight map and amount
|
||||||
|
vibrant_image = (
|
||||||
|
image * (1 - weight * amount) +
|
||||||
|
saturated_version * (weight * amount)
|
||||||
|
)
|
||||||
|
return np.clip(vibrant_image, 0.0, 1.0)
|
||||||
|
|
||||||
|
def scan_film(
|
||||||
|
image_to_scan: np.ndarray,
|
||||||
|
film_base_color_linear_rgb: np.ndarray,
|
||||||
|
scanner_type: str,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Simulates the physical scanning process using a spectral sensitivity model
|
||||||
|
and advanced tone/color processing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image_to_scan: Linear sRGB data of the transmitted light.
|
||||||
|
film_base_color_linear_rgb: Linear sRGB color of the film base.
|
||||||
|
scanner_type: The scanner model to emulate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A linear sRGB image representing the final scanned positive.
|
||||||
|
"""
|
||||||
|
print(f"--- Starting Advanced '{scanner_type}' scanner simulation ---")
|
||||||
|
if scanner_type not in ADVANCED_SCANNER_PRESETS:
|
||||||
|
print(f"Warning: Scanner type '{scanner_type}' not found. Returning original image.")
|
||||||
|
return image_to_scan
|
||||||
|
|
||||||
|
params = ADVANCED_SCANNER_PRESETS[scanner_type]
|
||||||
|
srgb_cs = colour.models.RGB_COLOURSPACE_sRGB
|
||||||
|
|
||||||
|
# 1. Define the Scanner's Native Color Space
|
||||||
|
scanner_cs = colour.RGB_Colourspace(
|
||||||
|
name=f"Scanner - {scanner_type}",
|
||||||
|
primaries=params['sensitivities']['primaries'],
|
||||||
|
whitepoint=params['sensitivities']['whitepoint']
|
||||||
|
)
|
||||||
|
print(" - Step 1: Defined scanner's unique spectral sensitivity model.")
|
||||||
|
|
||||||
|
# 2. Re-render Image into Scanner's Native Space
|
||||||
|
image_in_scanner_space = colour.RGB_to_RGB(
|
||||||
|
image_to_scan, srgb_cs, scanner_cs
|
||||||
|
)
|
||||||
|
base_in_scanner_space = colour.RGB_to_RGB(
|
||||||
|
film_base_color_linear_rgb, srgb_cs, scanner_cs
|
||||||
|
)
|
||||||
|
save_debug_image(np.clip(image_in_scanner_space, 0.0, 1.0), f"10a_scanner_native_capture_{scanner_type}_RGB")
|
||||||
|
print(" - Step 2: Re-rendered image into scanner's native color space.")
|
||||||
|
|
||||||
|
# 3. Film Base Removal and Inversion (in scanner space)
|
||||||
|
masked_removed = image_in_scanner_space / (base_in_scanner_space + EPSILON)
|
||||||
|
inverted_image = 1.0 / (masked_removed + EPSILON)
|
||||||
|
print(" - Step 3: Performed negative inversion in scanner space.")
|
||||||
|
|
||||||
|
# --- FIX: ADD AUTO-EXPOSURE NORMALIZATION ---
|
||||||
|
# This step is crucial. It mimics the scanner setting its white point from the
|
||||||
|
# brightest part of the inverted image (Dmin), scaling the raw inverted data
|
||||||
|
# into a usable range before applying the tone curve.
|
||||||
|
white_point_percentile = 99.9 # Use a high percentile to be robust against outliers
|
||||||
|
white_point = np.percentile(inverted_image, white_point_percentile, axis=(0, 1))
|
||||||
|
# Protect against division by zero if a channel is all black
|
||||||
|
white_point[white_point < EPSILON] = 1.0
|
||||||
|
normalized_image = inverted_image / white_point
|
||||||
|
print(f" - Step 3a: Auto-exposed image (set {white_point_percentile}% white point).")
|
||||||
|
save_debug_image(np.clip(normalized_image, 0.0, 1.0), f"10aa_scanner_normalized_{scanner_type}_RGB")
|
||||||
|
|
||||||
|
# 4. Apply Scanner Tone Curve
|
||||||
|
# The input is now the normalized_image, which is correctly scaled.
|
||||||
|
tone_params = params['tone_curve_params']
|
||||||
|
tone_curved_image = apply_parametric_s_curve(
|
||||||
|
normalized_image, **tone_params
|
||||||
|
)
|
||||||
|
save_debug_image(tone_curved_image, f"10b_scanner_tone_curve_{scanner_type}_RGB")
|
||||||
|
print(f" - Step 4: Applied scanner's tone curve. (Contrast: {tone_params['contrast']})")
|
||||||
|
|
||||||
|
|
||||||
|
# 5. Apply Scanner Color Science
|
||||||
|
color_corrected_image = np.einsum(
|
||||||
|
'hwj,ij->hwi', tone_curved_image, np.array(params['color_matrix'])
|
||||||
|
)
|
||||||
|
saturated_image = apply_saturation_rgb(color_corrected_image, params['saturation'])
|
||||||
|
final_look_image = apply_vibrance(saturated_image, params['vibrance'])
|
||||||
|
save_debug_image(final_look_image, f"10c_scanner_color_science_{scanner_type}_RGB")
|
||||||
|
print(f" - Step 5: Applied color matrix, saturation, and vibrance.")
|
||||||
|
|
||||||
|
# 6. Convert Image back to Standard Linear sRGB
|
||||||
|
final_image_srgb = colour.RGB_to_RGB(
|
||||||
|
final_look_image, scanner_cs, srgb_cs
|
||||||
|
)
|
||||||
|
print(" - Step 6: Converted final image back to standard sRGB linear.")
|
||||||
|
print(f"--- Finished Advanced '{scanner_type}' scanner simulation ---")
|
||||||
|
|
||||||
|
return np.clip(final_image_srgb, 0.0, 1.0)
|
||||||
|
|
||||||
def chromatic_adaptation_white_balance(
|
def chromatic_adaptation_white_balance(
|
||||||
image_linear_srgb: np.ndarray,
|
image_linear_srgb: np.ndarray,
|
||||||
target_illuminant_name: str = "D65",
|
target_illuminant_name: str = "D65",
|
||||||
@ -1171,6 +1413,15 @@ def main():
|
|||||||
action="store_true",
|
action="store_true",
|
||||||
help="Simulate monochrome film grain in the output image.",
|
help="Simulate monochrome film grain in the output image.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--scanner-type",
|
||||||
|
type=str.lower,
|
||||||
|
choices=["none", "hasselblad", "frontier", "noritsu"],
|
||||||
|
default="none",
|
||||||
|
help="Simulate the color science of a specific scanner during negative conversion. "
|
||||||
|
"Set to 'none' to use the simple inversion. "
|
||||||
|
"Requires --perform-negative-correction."
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
datasheet: FilmDatasheet | None = parse_datasheet_json(args.datasheet_json)
|
datasheet: FilmDatasheet | None = parse_datasheet_json(args.datasheet_json)
|
||||||
@ -1642,13 +1893,21 @@ def main():
|
|||||||
|
|
||||||
# Apply Film Negative Correction if requested
|
# Apply Film Negative Correction if requested
|
||||||
if args.perform_negative_correction:
|
if args.perform_negative_correction:
|
||||||
print("Applying film negative correction...")
|
if args.scanner_type != "none":
|
||||||
print("Film base color:", film_base_color_linear_rgb)
|
final_image_to_save = scan_film(
|
||||||
masked_removed = final_image_to_save / film_base_color_linear_rgb
|
final_image_to_save,
|
||||||
inverted_image = 1.0 / (masked_removed + EPSILON) # Avoid division by zero
|
film_base_color_linear_rgb,
|
||||||
max_val = np.percentile(inverted_image, 99.9)
|
args.scanner_type
|
||||||
final_image_to_save = np.clip(inverted_image / max_val, 0.0, 1.0)
|
)
|
||||||
save_debug_image(final_image_to_save, "10_negative_corrected_image_RGB")
|
save_debug_image(final_image_to_save, f"10_scanned_image_{args.scanner_type}_RGB")
|
||||||
|
else:
|
||||||
|
print("Applying simple film negative inversion...")
|
||||||
|
print("Film base color:", film_base_color_linear_rgb)
|
||||||
|
masked_removed = final_image_to_save / (film_base_color_linear_rgb + EPSILON)
|
||||||
|
inverted_image = 1.0 / (masked_removed + EPSILON) # Avoid division by zero
|
||||||
|
max_val = np.percentile(inverted_image, 99.9)
|
||||||
|
final_image_to_save = np.clip(inverted_image / max_val, 0.0, 1.0)
|
||||||
|
save_debug_image(final_image_to_save, "10_negative_corrected_image_RGB")
|
||||||
|
|
||||||
# Apply Exposure Correction
|
# Apply Exposure Correction
|
||||||
if args.perform_exposure_correction:
|
if args.perform_exposure_correction:
|
||||||
|
111
testbench.py
111
testbench.py
@ -11,19 +11,34 @@ from functools import partial
|
|||||||
|
|
||||||
# This dictionary maps the desired abbreviation to the full command-line flag.
|
# This dictionary maps the desired abbreviation to the full command-line flag.
|
||||||
# This makes it easy to add or remove flags in the future.
|
# This makes it easy to add or remove flags in the future.
|
||||||
|
# This dictionary maps the desired abbreviation to the full command-line flag.
|
||||||
|
# Arguments are organized into "oneof" groups to avoid invalid combinations.
|
||||||
ARGS_MAP = {
|
ARGS_MAP = {
|
||||||
# 'fd': '--force-d65',
|
# 'fd': '--force-d65',
|
||||||
# 'pnc': '--perform-negative-correction',
|
# 'pnc': '--perform-negative-correction',
|
||||||
'pwb': '--perform-white-balance',
|
# 'pwb': '--perform-white-balance',
|
||||||
'pec': '--perform-exposure-correction',
|
# 'pec': '--perform-exposure-correction',
|
||||||
# 'rae': '--raw-auto-exposure',
|
# 'rae': '--raw-auto-exposure',
|
||||||
'sg': '--simulate-grain',
|
|
||||||
# 'mg': '--mono-grain'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Groups of mutually exclusive arguments (only one from each group should be used)
|
||||||
|
ONEOF_GROUPS = [
|
||||||
|
{
|
||||||
|
'smf': ['--scanner-type', 'frontier'],
|
||||||
|
'smh': ['--scanner-type', 'hasselblad'],
|
||||||
|
'smn': ['--scanner-type', 'noritsu']
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'sg': '--simulate-grain',
|
||||||
|
'mg': '--mono-grain'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
# --- Worker Function for Multiprocessing ---
|
# --- Worker Function for Multiprocessing ---
|
||||||
|
|
||||||
def run_filmcolor_command(job_info, filmcolor_path):
|
def run_filmcolor_command(job_info, filmcolor_path, dry_run=False):
|
||||||
"""
|
"""
|
||||||
Executes a single filmcolor command.
|
Executes a single filmcolor command.
|
||||||
This function is designed to be called by a multiprocessing Pool.
|
This function is designed to be called by a multiprocessing Pool.
|
||||||
@ -36,10 +51,18 @@ def run_filmcolor_command(job_info, filmcolor_path):
|
|||||||
datasheet,
|
datasheet,
|
||||||
output_file
|
output_file
|
||||||
]
|
]
|
||||||
command.extend(flags)
|
|
||||||
|
# Add all flags to the command
|
||||||
|
for flag in flags:
|
||||||
|
if isinstance(flag, list):
|
||||||
|
command.extend(flag) # For arguments with values like ['--scanner-model', 'frontier']
|
||||||
|
else:
|
||||||
|
command.append(flag) # For simple flags like '--simulate-grain'
|
||||||
|
|
||||||
command_str = " ".join(command)
|
command_str = " ".join(command)
|
||||||
print(f"🚀 Starting job: {os.path.basename(output_file)}")
|
print(f"🚀 Starting job: {os.path.basename(output_file)}")
|
||||||
|
if dry_run:
|
||||||
|
return f"🔍 DRY RUN: {command_str} (not executed)"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Using subprocess.run to execute the command
|
# Using subprocess.run to execute the command
|
||||||
@ -94,6 +117,16 @@ def main():
|
|||||||
default=3,
|
default=3,
|
||||||
help="Number of parallel jobs to run. (Default: 3)"
|
help="Number of parallel jobs to run. (Default: 3)"
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action='store_true',
|
||||||
|
help="If set, will only print the commands without executing them."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--refresh",
|
||||||
|
action='store_true',
|
||||||
|
help="If set, will reprocess existing output files. Otherwise, skips files that already exist."
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# 1. Find all input RAW files
|
# 1. Find all input RAW files
|
||||||
@ -126,15 +159,44 @@ def main():
|
|||||||
print(f" Found {len(datasheet_files)} datasheet files.")
|
print(f" Found {len(datasheet_files)} datasheet files.")
|
||||||
|
|
||||||
# 3. Generate all argument combinations
|
# 3. Generate all argument combinations
|
||||||
arg_abbreviations = list(ARGS_MAP.keys())
|
# Get regular standalone arguments
|
||||||
|
standalone_args = list(ARGS_MAP.keys())
|
||||||
|
|
||||||
|
# Generate all possible combinations of regular args
|
||||||
|
standalone_arg_combos = []
|
||||||
|
for i in range(len(standalone_args) + 1):
|
||||||
|
for combo in itertools.combinations(standalone_args, i):
|
||||||
|
standalone_arg_combos.append(sorted(list(combo)))
|
||||||
|
|
||||||
|
# Create all possible combinations with oneof groups
|
||||||
all_arg_combos = []
|
all_arg_combos = []
|
||||||
# Loop from 0 to len(abbreviations) to get combinations of all lengths
|
|
||||||
for i in range(len(arg_abbreviations) + 1):
|
# For each oneof group, we need to include either one option or none
|
||||||
for combo in itertools.combinations(arg_abbreviations, i):
|
oneof_options = []
|
||||||
all_arg_combos.append(sorted(list(combo))) # Sort for consistent naming
|
for group in ONEOF_GROUPS:
|
||||||
|
# Add an empty list to represent using no option from this group
|
||||||
|
group_options = [None]
|
||||||
|
# Add each option from the group
|
||||||
|
group_options.extend(group.keys())
|
||||||
|
oneof_options.append(group_options)
|
||||||
|
|
||||||
|
# Generate all combinations of oneof options
|
||||||
|
for oneof_combo in itertools.product(*oneof_options):
|
||||||
|
# Filter out None values
|
||||||
|
oneof_combo = [x for x in oneof_combo if x is not None]
|
||||||
|
|
||||||
|
# Combine with standalone args
|
||||||
|
for standalone_combo in standalone_arg_combos:
|
||||||
|
# Combine the two lists and sort for consistent naming
|
||||||
|
combined_combo = sorted(standalone_combo + oneof_combo)
|
||||||
|
all_arg_combos.append(combined_combo)
|
||||||
|
|
||||||
|
# Remove any duplicates
|
||||||
|
all_arg_combos = [list(x) for x in set(map(tuple, all_arg_combos))]
|
||||||
|
|
||||||
# 4. Create the full list of jobs to run
|
# 4. Create the full list of jobs to run
|
||||||
jobs_to_run = []
|
jobs_to_run = []
|
||||||
|
skipped_jobs = 0
|
||||||
for raw_file_path in raw_files:
|
for raw_file_path in raw_files:
|
||||||
input_dir = os.path.dirname(raw_file_path)
|
input_dir = os.path.dirname(raw_file_path)
|
||||||
input_filename = os.path.basename(raw_file_path)
|
input_filename = os.path.basename(raw_file_path)
|
||||||
@ -153,14 +215,36 @@ def main():
|
|||||||
|
|
||||||
output_path = os.path.join(input_dir, output_name)
|
output_path = os.path.join(input_dir, output_name)
|
||||||
|
|
||||||
|
# Skip if file exists and --refresh is not set
|
||||||
|
if os.path.exists(output_path) and not args.refresh:
|
||||||
|
skipped_jobs += 1
|
||||||
|
continue
|
||||||
|
|
||||||
# Get the full flags from the abbreviations
|
# Get the full flags from the abbreviations
|
||||||
flags = [ARGS_MAP[abbr] for abbr in arg_combo_abbrs] + ['--perform-negative-correction'] # always include this flag
|
flags = []
|
||||||
|
for abbr in arg_combo_abbrs:
|
||||||
|
# Check if this is from a oneof group
|
||||||
|
is_oneof = False
|
||||||
|
for group in ONEOF_GROUPS:
|
||||||
|
if abbr in group:
|
||||||
|
flags.append(group[abbr])
|
||||||
|
is_oneof = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# If not from a oneof group, use the regular ARGS_MAP
|
||||||
|
if not is_oneof and abbr in ARGS_MAP:
|
||||||
|
flags.append(ARGS_MAP[abbr])
|
||||||
|
|
||||||
|
# Add required flags
|
||||||
|
flags.extend(['--perform-negative-correction', "--perform-white-balance", '--perform-exposure-correction'])
|
||||||
|
|
||||||
# Add the complete job description to our list
|
# Add the complete job description to our list
|
||||||
jobs_to_run.append((raw_file_path, datasheet_path, output_path, flags))
|
jobs_to_run.append((raw_file_path, datasheet_path, output_path, flags))
|
||||||
|
|
||||||
total_jobs = len(jobs_to_run)
|
total_jobs = len(jobs_to_run)
|
||||||
print(f"\n✨ Generated {total_jobs} total jobs to run.")
|
print(f"\n✨ Generated {total_jobs} total jobs to run.")
|
||||||
|
if skipped_jobs > 0:
|
||||||
|
print(f"⏭️ Skipped {skipped_jobs} existing output files. Use --refresh to reprocess them.")
|
||||||
if total_jobs == 0:
|
if total_jobs == 0:
|
||||||
print("Nothing to do. Exiting.")
|
print("Nothing to do. Exiting.")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@ -175,11 +259,10 @@ def main():
|
|||||||
print("\nAborted by user.")
|
print("\nAborted by user.")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
# 5. Run the jobs in a multiprocessing pool
|
# 5. Run the jobs in a multiprocessing pool
|
||||||
print("\n--- Starting Testbench ---\n")
|
print("\n--- Starting Testbench ---\n")
|
||||||
# `partial` is used to "pre-fill" the filmcolor_path argument of our worker function
|
# `partial` is used to "pre-fill" the filmcolor_path argument of our worker function
|
||||||
worker_func = partial(run_filmcolor_command, filmcolor_path=args.filmcolor_path)
|
worker_func = partial(run_filmcolor_command, filmcolor_path=args.filmcolor_path, dry_run=args.dry_run)
|
||||||
|
|
||||||
with Pool(processes=args.jobs) as pool:
|
with Pool(processes=args.jobs) as pool:
|
||||||
# imap_unordered is great for this: it yields results as they complete,
|
# imap_unordered is great for this: it yields results as they complete,
|
||||||
|
Reference in New Issue
Block a user