466 lines
19 KiB
Plaintext
Executable File
466 lines
19 KiB
Plaintext
Executable File
#!/usr/bin/env -S uv run --script
|
|
# /// script
|
|
# dependencies = [
|
|
# "numpy",
|
|
# "scipy",
|
|
# "Pillow",
|
|
# "imageio",
|
|
# "rawpy",
|
|
# "colour-science",
|
|
# ]
|
|
# ///
|
|
#!/usr/bin/env python
|
|
|
|
#!/usr/bin/env python
|
|
import argparse
|
|
import sys
|
|
import numpy as np
|
|
import imageio.v2 as iio
|
|
import colour
|
|
from scipy.signal import convolve2d
|
|
from scipy.ndimage import gaussian_filter1d
|
|
from scipy.signal import find_peaks, savgol_filter
|
|
from scipy.ndimage import sobel
|
|
|
|
|
|
# --- Color Space Conversion ---
|
|
|
|
def to_acescg(image, input_colorspace='sRGB'):
|
|
"""Converts an image from a specified colorspace to ACEScg."""
|
|
return colour.RGB_to_RGB(image, colour.models.RGB_COLOURSPACES[input_colorspace], colour.models.RGB_COLOURSPACES['ACEScg'])
|
|
|
|
def from_acescg(image, output_colorspace='sRGB'):
|
|
"""Converts an image from ACEScg to a specified colorspace."""
|
|
return colour.RGB_to_RGB(image, colour.models.RGB_COLOURSPACES['ACEScg'], colour.models.RGB_COLOURSPACES[output_colorspace])
|
|
|
|
# --- Image Processing ---
|
|
|
|
def _analyze_profile(profile, prominence, width):
|
|
"""Helper function to find the most prominent peak in a 1D gradient profile."""
|
|
if profile.size == 0:
|
|
return None
|
|
# Smooth the profile to reduce noise. Window must be odd and less than profile size.
|
|
window_length = min(51, len(profile) // 2 * 2 + 1)
|
|
if window_length < 5: # savgol_filter requires window_length > polyorder
|
|
smoothed_profile = profile
|
|
else:
|
|
smoothed_profile = savgol_filter(profile, window_length=window_length, polyorder=2)
|
|
# Find all peaks that stand out from the baseline.
|
|
peaks, properties = find_peaks(smoothed_profile, prominence=prominence, width=width)
|
|
if len(peaks) == 0:
|
|
return None
|
|
# Return the index of the most prominent peak.
|
|
most_prominent_peak_index = np.argmax(properties['prominences'])
|
|
return peaks[most_prominent_peak_index]
|
|
|
|
|
|
def detect_and_crop_border_gradient(image, border_percent=5, prominence=5.0, min_width=2):
|
|
"""
|
|
Detects film edges using a directional gradient method, robust to complex borders.
|
|
"""
|
|
# 1. Convert to grayscale for gradient analysis
|
|
luminosity_weights = np.array([0.2126, 0.7152, 0.0722])
|
|
image_gray = np.dot(image, luminosity_weights)
|
|
height, width = image_gray.shape
|
|
|
|
# 2. Calculate directional gradients once for the entire image
|
|
grad_y = sobel(image_gray, axis=0) # For horizontal lines (top, bottom)
|
|
grad_x = sobel(image_gray, axis=1) # For vertical lines (left, right)
|
|
|
|
coords = {}
|
|
search_w = int(width * border_percent / 100)
|
|
search_h = int(height * border_percent / 100)
|
|
|
|
# 3. Analyze each border
|
|
# Left Edge (Dark -> Light transition => positive grad_x)
|
|
left_profile = np.sum(np.maximum(0, grad_x[:, :search_w]), axis=0)
|
|
left_coord = _analyze_profile(left_profile, prominence, min_width)
|
|
coords['left'] = left_coord if left_coord is not None else 0
|
|
|
|
# Right Edge (Light -> Dark transition => negative grad_x)
|
|
right_profile = np.sum(np.maximum(0, -grad_x[:, -search_w:]), axis=0)
|
|
right_coord = _analyze_profile(right_profile[::-1], prominence, min_width)
|
|
coords['right'] = (width - 1 - right_coord) if right_coord is not None else (width - 1)
|
|
|
|
# Top Edge (Dark -> Light transition => positive grad_y)
|
|
top_profile = np.sum(np.maximum(0, grad_y[:search_h, :]), axis=1)
|
|
top_coord = _analyze_profile(top_profile, prominence, min_width)
|
|
coords['top'] = top_coord if top_coord is not None else 0
|
|
|
|
# Bottom Edge (Light -> Dark transition => negative grad_y)
|
|
bottom_profile = np.sum(np.maximum(0, -grad_y[-search_h:, :]), axis=1)
|
|
bottom_coord = _analyze_profile(bottom_profile[::-1], prominence, min_width)
|
|
coords['bottom'] = (height - 1 - bottom_coord) if bottom_coord is not None else (height - 1)
|
|
|
|
l, t, r, b = map(int, [coords['left'], coords['top'], coords['right'], coords['bottom']])
|
|
|
|
if not (l < r and t < b):
|
|
print("Warning: Gradient border detection failed. Using full image.", file=sys.stderr)
|
|
film_base_color = np.median(image.reshape(-1, 3), axis=0)
|
|
return image, film_base_color
|
|
|
|
print(f"Detected image box: (left, top, right, bottom) = ({l}, {t}, {r}, {b})")
|
|
|
|
# 4. Sample film base color from the border regions
|
|
mask = np.zeros(image.shape[:2], dtype=bool)
|
|
mask[t:b+1, l:r+1] = True
|
|
border_pixels = image[~mask]
|
|
|
|
if border_pixels.size == 0:
|
|
print("Warning: Border detected, but no pixels to sample. Using image median.", file=sys.stderr)
|
|
film_base_color = np.median(image[t:b+1, l:r+1].reshape(-1, 3), axis=0)
|
|
else:
|
|
film_base_color = np.median(border_pixels.reshape(-1, 3), axis=0)
|
|
|
|
# 5. Crop and return
|
|
cropped_image = image[t:b+1, l:r+1]
|
|
return cropped_image, film_base_color
|
|
|
|
|
|
|
|
def invert_negative(image, params):
|
|
"""
|
|
Inverts a negative image based on RawTherapee's filmnegative module logic.
|
|
"""
|
|
ref_in = params['refInput'] + 1e-9 # Add epsilon to avoid division by zero
|
|
ref_out = params['refOutput']
|
|
|
|
rexp = -(params['greenExp'] * params['redRatio'])
|
|
gexp = -params['greenExp']
|
|
bexp = -(params['greenExp'] * params['blueRatio'])
|
|
|
|
rmult = ref_out[0] / (ref_in[0] ** rexp)
|
|
gmult = ref_out[1] / (ref_in[1] ** gexp)
|
|
bmult = ref_out[2] / (ref_in[2] ** bexp)
|
|
|
|
inverted_image = image.copy() + 1e-9
|
|
|
|
inverted_image[:, :, 0] = rmult * (inverted_image[:, :, 0] ** rexp)
|
|
inverted_image[:, :, 1] = gmult * (inverted_image[:, :, 1] ** gexp)
|
|
inverted_image[:, :, 2] = bmult * (inverted_image[:, :, 2] ** bexp)
|
|
|
|
return np.clip(inverted_image, 0.0, None)
|
|
|
|
|
|
def negative_auto_exposure(image, target_percentile=50, highlight_percentile=99.5, highlight_preservation=0.85):
|
|
"""
|
|
Automatically adjusts the exposure of a negative image to maximize dynamic range while preserving highlights.
|
|
|
|
Args:
|
|
image: Input image in linear RGB space (ACEScg) NEGATIVE
|
|
target_percentile: Percentile to target for middle exposure (default: 50)
|
|
highlight_percentile: Percentile to consider as highlights (default: 99.5)
|
|
highlight_preservation: Controls how much to preserve highlights (0-1, default: 0.85)
|
|
|
|
Returns:
|
|
Adjusted image with optimized dynamic range
|
|
"""
|
|
# Calculate luminance using standard coefficients for linear light
|
|
luminance = 0.2126 * image[:,:,0] + 0.7152 * image[:,:,1] + 0.0722 * image[:,:,2]
|
|
|
|
# Analyze histogram
|
|
hist_values = luminance.flatten()
|
|
|
|
# Get key luminance values from histogram
|
|
mid_value = np.percentile(hist_values, target_percentile)
|
|
highlight_value = np.percentile(hist_values, highlight_percentile)
|
|
|
|
# Target middle gray for optimal exposure (standard for scene-referred linear)
|
|
target_middle = 0.18
|
|
|
|
# Calculate exposure factor for middle values
|
|
middle_exposure_factor = target_middle / (mid_value + 1e-9)
|
|
|
|
# Calculate highlight protection factor
|
|
highlight_target = 0.9 # Target highlights to be at 90% of range
|
|
highlight_exposure_factor = highlight_target / (highlight_value * middle_exposure_factor + 1e-9)
|
|
|
|
# Blend between middle and highlight exposure factor based on preservation setting
|
|
final_exposure_factor = middle_exposure_factor * (1 - highlight_preservation) + \
|
|
(middle_exposure_factor * highlight_exposure_factor) * highlight_preservation
|
|
|
|
# Apply exposure adjustment
|
|
adjusted_image = image * final_exposure_factor
|
|
|
|
return np.clip(adjusted_image, 0.0, None) # Ensure no negative values but allow overflow for highlights
|
|
|
|
|
|
def auto_exposure(image, target_percentile=50, highlight_percentile=99.5, highlight_preservation=0.85):
|
|
"""
|
|
Automatically adjusts the exposure of an image to maximize dynamic range while preserving highlights.
|
|
|
|
Args:
|
|
image: Input image in linear RGB space (ACEScg)
|
|
target_percentile: Percentile to target for middle exposure (default: 50)
|
|
highlight_percentile: Percentile to consider as highlights (default: 99.5)
|
|
highlight_preservation: Controls how much to preserve highlights (0-1, default: 0.85)
|
|
|
|
Returns:
|
|
Adjusted image with optimized dynamic range
|
|
"""
|
|
# Calculate luminance using standard coefficients for linear light
|
|
luminance = 0.2126 * image[:,:,0] + 0.7152 * image[:,:,1] + 0.0722 * image[:,:,2]
|
|
|
|
# Analyze histogram
|
|
hist_values = luminance.flatten()
|
|
|
|
# Get key luminance values from histogram
|
|
mid_value = np.percentile(hist_values, target_percentile)
|
|
highlight_value = np.percentile(hist_values, highlight_percentile)
|
|
|
|
# Target middle gray for optimal exposure (standard for scene-referred linear)
|
|
target_middle = 0.18
|
|
|
|
# Calculate exposure factor for middle values
|
|
middle_exposure_factor = target_middle / (mid_value + 1e-9)
|
|
|
|
# Calculate highlight protection factor
|
|
highlight_target = 0.9 # Target highlights to be at 90% of range
|
|
highlight_exposure_factor = highlight_target / (highlight_value * middle_exposure_factor + 1e-9)
|
|
|
|
# Blend between middle and highlight exposure factor based on preservation setting
|
|
final_exposure_factor = middle_exposure_factor * (1 - highlight_preservation) + \
|
|
(middle_exposure_factor * highlight_exposure_factor) * highlight_preservation
|
|
|
|
# Apply exposure adjustment
|
|
adjusted_image = image * final_exposure_factor
|
|
|
|
# Apply subtle S-curve for enhanced contrast while preserving highlights
|
|
# Convert to log space for easier manipulation
|
|
log_image = np.log2(adjusted_image + 1e-9)
|
|
|
|
# Apply soft contrast enhancement
|
|
contrast_strength = 0.15
|
|
log_image = log_image * (1 + contrast_strength) - np.mean(log_image) * contrast_strength
|
|
|
|
# Convert back to linear space
|
|
enhanced_image = np.power(2, log_image)
|
|
|
|
# Ensure no negative values, but allow overflow for highlight processing later
|
|
return np.clip(enhanced_image, 0.0, None)
|
|
|
|
|
|
def auto_exposure_pec(linear_image, **kwargs):
|
|
"""
|
|
Implements Practical Exposure Correction (PEC) on a linear image.
|
|
Automatically determines the correction mode based on image brightness.
|
|
"""
|
|
params = {
|
|
'K': kwargs.get('K', 3),
|
|
'c_under': kwargs.get('c_under', 1.0),
|
|
'c_over': kwargs.get('c_over', 0.6),
|
|
'target_lum': kwargs.get('target_lum', 0.18)
|
|
}
|
|
|
|
# Auto-detect mode
|
|
luminosity_weights = np.array([0.2126, 0.7152, 0.0722])
|
|
mean_luminance = np.mean(np.dot(linear_image, luminosity_weights))
|
|
|
|
if mean_luminance < params['target_lum']:
|
|
mode, c = 'underexposure', params['c_under']
|
|
op = np.add
|
|
print(f"Image appears underexposed (mean lum: {mean_luminance:.3f}). Applying PEC in '{mode}' mode.")
|
|
else:
|
|
mode, c = 'overexposure', params['c_over']
|
|
op = np.subtract
|
|
print(f"Image appears overexposed (mean lum: {mean_luminance:.3f}). Applying PEC in '{mode}' mode.")
|
|
|
|
y = linear_image.astype(np.float64)
|
|
adversarial_func = lambda z: c * z * (1 - z)
|
|
g_y = op(y, adversarial_func(y))
|
|
x_k = g_y.copy()
|
|
|
|
# The PEC iterative scheme (T=1, as recommended)
|
|
for _ in range(params['K']):
|
|
compensation = adversarial_func(x_k)
|
|
x_k = op(g_y, compensation)
|
|
|
|
return x_k
|
|
|
|
|
|
def _rgb_to_yuv_huo(image_rgb: np.ndarray) -> np.ndarray:
|
|
"""Converts RGB to the paper's specific YUV space."""
|
|
matrix = np.array([[0.299, -0.299, 0.701], [0.587, -0.587, -0.587], [0.114, 0.886, -0.114]])
|
|
return image_rgb.astype(np.float64) @ matrix
|
|
|
|
def _k_function(error: float, a: float, b: float) -> float:
|
|
"""Non-linear error weighting function K(x) from Eq. 16."""
|
|
abs_error, sign = np.abs(error), np.sign(error)
|
|
if abs_error >= a: return 2.0 * sign
|
|
elif abs_error >= b: return 1.0 * sign
|
|
else: return 0.0
|
|
|
|
def white_balance_huo(image_float: np.ndarray, **kwargs):
|
|
"""Performs iterative white balance based on the Huo et al. 2006 paper."""
|
|
params = {
|
|
't_threshold': kwargs.get('t_threshold', 0.1321),
|
|
'mu': kwargs.get('mu', 0.0312),
|
|
'a': kwargs.get('a', 0.8 / 255.0),
|
|
'b': kwargs.get('b', 0.15 / 255.0),
|
|
'max_iter': kwargs.get('max_iter', 16),
|
|
}
|
|
|
|
gains = np.array([1.0, 1.0, 1.0], dtype=np.float64)
|
|
|
|
print("Starting iterative white balance adjustment...")
|
|
for i in range(params['max_iter']):
|
|
balanced_image = np.clip(image_float * gains, 0.0, 1.0)
|
|
yuv_image = _rgb_to_yuv_huo(balanced_image)
|
|
Y, U, V = yuv_image[..., 0], yuv_image[..., 1], yuv_image[..., 2]
|
|
|
|
luminance_mask = (Y > 0.1) & (Y < 0.95)
|
|
if not np.any(luminance_mask):
|
|
print(f"Iteration {i+1}: No pixels in valid luminance range. Stopping."); break
|
|
|
|
gray_mask_indices = (np.abs(U[luminance_mask]) + np.abs(V[luminance_mask])) / Y[luminance_mask] < params['t_threshold']
|
|
|
|
gray_points_U = U[luminance_mask][gray_mask_indices]
|
|
gray_points_V = V[luminance_mask][gray_mask_indices]
|
|
|
|
if gray_points_U.size < 100:
|
|
print(f"Iteration {i+1}: Not enough gray points found ({gray_points_U.size}). Stopping."); break
|
|
|
|
u_mean, v_mean = np.mean(gray_points_U), np.mean(gray_points_V)
|
|
|
|
if np.abs(u_mean) < params['b'] and np.abs(v_mean) < params['b']:
|
|
print(f"Iteration {i+1}: Converged. u_mean={u_mean:.4f}, v_mean={v_mean:.4f}"); break
|
|
|
|
error, channel_idx, channel_name = (-u_mean, 2, "B") if np.abs(u_mean) > np.abs(v_mean) else (-v_mean, 0, "R")
|
|
adjustment = params['mu'] * _k_function(error, params['a'], params['b'])
|
|
gains[channel_idx] += adjustment
|
|
print(f"Iter {i+1:2d}: Adjusting {channel_name}-gain. u_mean={u_mean:.4f}, v_mean={v_mean:.4f}, Adj={adjustment:+.4f}")
|
|
|
|
print(f"Final gains: R={gains[0]:.4f}, G={gains[1]:.4f}, B={gains[2]:.4f}")
|
|
return image_float * gains
|
|
|
|
|
|
def white_balance_gray_world(image):
|
|
"""
|
|
Performs white balancing using the Gray World assumption.
|
|
"""
|
|
r_avg, g_avg, b_avg = np.mean(image, axis=(0, 1))
|
|
avg_lum = (r_avg + g_avg + b_avg) / 3.0
|
|
|
|
r_gain = avg_lum / (r_avg + 1e-9)
|
|
g_gain = avg_lum / (g_avg + 1e-9)
|
|
b_gain = avg_lum / (b_avg + 1e-9)
|
|
|
|
wb_image = image.copy()
|
|
wb_image[:, :, 0] *= r_gain
|
|
wb_image[:, :, 1] *= g_gain
|
|
wb_image[:, :, 2] *= b_gain
|
|
|
|
return wb_image
|
|
|
|
# --- Main Execution ---
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Converts a film negative to a positive image. Requires numpy, imageio, and colour-science.",
|
|
formatter_class=argparse.RawTextHelpFormatter
|
|
)
|
|
parser.add_argument('input_image', help="Path to the input negative image file (e.g., TIFF, PNG, JPG).")
|
|
parser.add_argument('output_image', help="Path to save the output positive image file.")
|
|
parser.add_argument('--border', action='store_true', help="Indicates the image has a film border to sample the base color from.")
|
|
parser.add_argument('--no-crop', dest='crop', action='store_false', help="Disables cropping of the film border when --border is used.")
|
|
parser.add_argument('--no-wb', dest='white_balance', action='store_false', help="Disables the automatic white balance step.")
|
|
parser.add_argument('--no-auto-exposure', dest='auto_exposure', action='store_false', help="Disables the automatic exposure adjustment step.")
|
|
parser.add_argument('--prominence', type=float, default=5.0, help="[Border] Peak prominence for edge detection.")
|
|
parser.add_argument('--awb-t', type=float, default=0.1321, help="[AWB] Gray point detection threshold `T`.")
|
|
parser.add_argument('--awb-mu', type=float, default=0.0312, help="[AWB] Adjustment step size `mu`.")
|
|
parser.add_argument('--pec-k', type=int, default=3, help="[Exposure] Number of inner loop iterations K.")
|
|
parser.add_argument('--pec-target-lum', type=float, default=0.18, help="[Exposure] Target middle gray for auto mode detection.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# 1. Load Image
|
|
try:
|
|
image_raw = iio.imread(args.input_image)
|
|
except FileNotFoundError:
|
|
print(f"Error: Input file not found at {args.input_image}", file=sys.stderr)
|
|
return
|
|
|
|
# Convert to float (0.0 to 1.0) for processing
|
|
if image_raw.dtype == np.uint16:
|
|
image_fp = image_raw.astype(np.float32) / 65535.0
|
|
elif image_raw.dtype == np.uint8:
|
|
image_fp = image_raw.astype(np.float32) / 255.0
|
|
else: # Handle other types like float
|
|
image_fp = image_raw.astype(np.float32)
|
|
if image_fp.max() > 1.0:
|
|
image_fp /= image_fp.max()
|
|
|
|
# Handle grayscale and alpha channels using numpy
|
|
if image_fp.ndim == 2:
|
|
print("Input is grayscale, converting to RGB.", file=sys.stderr)
|
|
image_fp = np.stack((image_fp,) * 3, axis=-1)
|
|
if image_fp.shape[2] == 4:
|
|
print("Input has alpha channel, removing it for processing.", file=sys.stderr)
|
|
image_fp = image_fp[:, :, :3]
|
|
|
|
# 2. Convert to ACEScg Colorspace
|
|
image_to_process = None
|
|
if args.border:
|
|
print("Using marching border detection...")
|
|
cropped_image, film_base_color = detect_and_crop_border_gradient(image_fp, prominence=args.prominence)
|
|
if args.crop:
|
|
print("Cropping border...")
|
|
image_fp = cropped_image
|
|
print("Converting to ACEScg...")
|
|
image_aces = to_acescg(image_fp, 'sRGB')
|
|
image_to_process = image_aces
|
|
|
|
else:
|
|
print("No border specified, using image median for base color...")
|
|
print("Converting to ACEScg...")
|
|
image_aces = to_acescg(image_fp, 'sRGB')
|
|
image_to_process = image_aces
|
|
h, w, _ = image_to_process.shape
|
|
center_crop = image_to_process[h//4:3*h//4, w//4:3*w//4, :]
|
|
film_base_color = np.median(center_crop.reshape(-1, 3), axis=0)
|
|
|
|
print(f"Detected film base color (ACEScg): {film_base_color}")
|
|
|
|
# 4. Invert Negative
|
|
print("Inverting negative...")
|
|
inversion_params = {
|
|
'greenExp': 1.5,
|
|
'redRatio': 2.04 / 1.5,
|
|
'blueRatio': 1.29 / 1.5,
|
|
'refInput': film_base_color,
|
|
'refOutput': np.array([0.05, 0.05, 0.05])
|
|
}
|
|
|
|
positive_image = invert_negative(image_to_process, inversion_params)
|
|
|
|
# 5. Auto Exposure Adjustment
|
|
if args.auto_exposure:
|
|
print("Applying automatic exposure adjustment...")
|
|
positive_image = auto_exposure_pec(positive_image, K=args.pec_k, target_lum=args.pec_target_lum)
|
|
|
|
# 5. White Balance
|
|
if args.white_balance:
|
|
print("Applying white balance...")
|
|
awb_params = {'t_threshold': args.awb_t, 'mu': args.awb_mu}
|
|
positive_image = white_balance_gray_world(positive_image)
|
|
|
|
# 6. Convert back from ACEScg and save
|
|
print("Converting from ACEScg to sRGB for output...")
|
|
output_image_srgb = from_acescg(positive_image, 'sRGB')
|
|
output_image_srgb = np.clip(output_image_srgb, 0.0, 1.0)
|
|
|
|
# 7. Save to file
|
|
output_extension = args.output_image.lower().split('.')[-1]
|
|
if output_extension in ['tif', 'tiff']:
|
|
print("Saving as 16-bit TIFF.")
|
|
final_image = (output_image_srgb * 65535.0).astype(np.uint16)
|
|
else:
|
|
print("Saving as 8-bit image.")
|
|
final_image = (output_image_srgb * 255.0).astype(np.uint8)
|
|
|
|
iio.imwrite(args.output_image, final_image)
|
|
print(f"Successfully saved positive image to {args.output_image}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |