From bb253155fbfb15d33eea9888c5a43dc44568f5d8 Mon Sep 17 00:00:00 2001 From: Tanishq Dubey Date: Thu, 19 Jun 2025 15:31:45 -0400 Subject: [PATCH] huge changes --- border.py | 160 +++++ compare.py | 163 +++++ debug_stitch.py | 147 ++++ filmcolor | 1440 +++++++++++++++++++++++++++++++++++--- filmgrain | 77 +- filmscan | 28 +- filmscanv2 | 466 ++++++++++++ filmscanv3.py | 242 +++++++ hdrtest.py | 36 + poster.py | 138 ++++ posterv2.py | 167 +++++ pyproject.toml | 33 +- sim_data/ektar_100.json | 2 +- sim_data/gold_1000.json | 394 +++++++++++ sim_data/portra_400.json | 2 +- testbench.py | 194 +++++ uv.lock | 441 +++++++++++- wb.py | 242 +++++++ 18 files changed, 4243 insertions(+), 129 deletions(-) create mode 100644 border.py create mode 100644 compare.py create mode 100644 debug_stitch.py create mode 100755 filmscanv2 create mode 100755 filmscanv3.py create mode 100644 hdrtest.py create mode 100644 poster.py create mode 100644 posterv2.py create mode 100644 sim_data/gold_1000.json create mode 100644 testbench.py create mode 100644 wb.py diff --git a/border.py b/border.py new file mode 100644 index 0000000..ba7a040 --- /dev/null +++ b/border.py @@ -0,0 +1,160 @@ +import numpy as np +from PIL import Image, ImageDraw +from scipy.signal import find_peaks, savgol_filter +from scipy.ndimage import sobel + +def analyze_profile( + profile: np.ndarray, + prominence: float, + width: int, + direction: str +) -> int | None: + """ + Analyzes a 1D profile to find the most likely edge coordinate. + """ + if profile.size == 0: + return None + + # 1. Smooth the profile to reduce noise while preserving peak shapes. + # The window length must be odd. + window_length = min(21, len(profile) // 2 * 2 + 1) + if window_length < 5: # savgol_filter requires window_length > polyorder + smoothed_profile = profile + else: + smoothed_profile = savgol_filter(profile, window_length=window_length, polyorder=2) + + # 2. Find all significant peaks in the profile. + # Prominence is a measure of how much a peak stands out from the baseline. + peaks, properties = find_peaks(smoothed_profile, prominence=prominence, width=width) + + if len(peaks) == 0: + return None + + # 3. Select the best peak. We choose the one with the highest prominence. + most_prominent_peak_index = np.argmax(properties['prominences']) + best_peak = peaks[most_prominent_peak_index] + + return best_peak + +def find_film_edges_gradient( + image_path: str, + border_percent: int = 15, + prominence: float = 10.0, + min_width: int = 2 +) -> tuple[int, int, int, int] | None: + """ + Detects film edges using a directional gradient method, which is robust + to complex borders and internal image features. + + Args: + image_path (str): Path to the image file. + border_percent (int): The percentage of image dimensions to search for a border. + prominence (float): Required prominence of a gradient peak to be considered an edge. + This is the most critical tuning parameter. Higher values mean + the edge must be sharper and more distinct. + min_width (int): The minimum width (in pixels) of a peak to be considered. + Helps ignore single-pixel noise. + + Returns: + A tuple (left, top, right, bottom) or None if detection fails. + """ + try: + with Image.open(image_path) as img: + image_gray = np.array(img.convert('L'), dtype=float) + height, width = image_gray.shape + except Exception as e: + print(f"Error opening or processing image: {e}") + return None + + # 1. Calculate directional gradients for the entire image once. + grad_y = sobel(image_gray, axis=0) # For horizontal lines (top, bottom) + grad_x = sobel(image_gray, axis=1) # For vertical lines (left, right) + + coords = {} + search_w = int(width * border_percent / 100) + search_h = int(height * border_percent / 100) + + # 2. Find Left Edge (Dark -> Light transition, so positive grad_x) + left_band_grad = grad_x[:, :search_w] + # We only care about positive gradients (dark to light) + left_profile = np.sum(np.maximum(0, left_band_grad), axis=0) + left_coord = analyze_profile(left_profile, prominence, min_width, "left") + coords['left'] = left_coord if left_coord is not None else 0 + + # 3. Find Right Edge (Light -> Dark transition, so negative grad_x) + right_band_grad = grad_x[:, -search_w:] + # We want the strongest negative gradient, so we flip the sign. + right_profile = np.sum(np.maximum(0, -right_band_grad), axis=0) + # The profile is from right-to-left, so we analyze its reversed version. + right_coord = analyze_profile(right_profile[::-1], prominence, min_width, "right") + if right_coord is not None: + # Convert coordinate back to the original image space + coords['right'] = width - 1 - right_coord + else: + coords['right'] = width - 1 + + # 4. Find Top Edge (Dark -> Light transition, so positive grad_y) + top_band_grad = grad_y[:search_h, :] + top_profile = np.sum(np.maximum(0, top_band_grad), axis=1) + top_coord = analyze_profile(top_profile, prominence, min_width, "top") + coords['top'] = top_coord if top_coord is not None else 0 + + # 5. Find Bottom Edge (Light -> Dark transition, so negative grad_y) + bottom_band_grad = grad_y[-search_h:, :] + bottom_profile = np.sum(np.maximum(0, -bottom_band_grad), axis=1) + bottom_coord = analyze_profile(bottom_profile[::-1], prominence, min_width, "bottom") + if bottom_coord is not None: + coords['bottom'] = height - 1 - bottom_coord + else: + coords['bottom'] = height - 1 + + # 6. Sanity check and return + left, top, right, bottom = map(int, [coords['left'], coords['top'], coords['right'], coords['bottom']]) + if not (left < right and top < bottom): + print("Warning: Detection failed, coordinates are illogical.") + return None + + return (left, top, right, bottom) + + +# --- Example Usage --- +if __name__ == "__main__": + # Use the image you provided. + # NOTE: You must save your image as 'test_negative_v2.png' in the same + # directory as the script, or change the path here. + image_file = "test_negative_v2.png" + + print(f"Attempting to detect edges on '{image_file}' with gradient method...") + + # Run the detection. The 'prominence' parameter is the one to tune. + # Start with a moderate value and increase if it detects noise, decrease + # if it misses a subtle edge. + crop_box = find_film_edges_gradient( + image_file, + border_percent=15, # Increase search area slightly due to complex borders + prominence=5.0, # A higher value is needed for this high-contrast example + min_width=2 + ) + + if crop_box: + print(f"\nDetected image box: {crop_box}") + + with Image.open(image_file) as img: + # Add a white border for better visualization if the background is black + img_with_border = Image.new('RGB', (img.width + 2, img.height + 2), 'white') + img_with_border.paste(img, (1, 1)) + + draw = ImageDraw.Draw(img_with_border) + # Offset the crop box by 1 pixel due to the added border + offset_box = [c + 1 for c in crop_box] + draw.rectangle(offset_box, outline="red", width=2) + + output_path = "test_negative_detected_final.png" + img_with_border.save(output_path) + print(f"Saved visualization to '{output_path}'") + try: + img_with_border.show(title="Detection Result") + except Exception: + pass + else: + print("\nCould not robustly detect the film edges.") \ No newline at end of file diff --git a/compare.py b/compare.py new file mode 100644 index 0000000..780a0c4 --- /dev/null +++ b/compare.py @@ -0,0 +1,163 @@ +import os +from PIL import Image, ImageDraw, ImageFont +import matplotlib.pyplot as plt +import numpy as np + +def create_advanced_comparison_poster( + image_paths, + output_path="05.jpg", + patch_size=(300, 300), + zoom_level=2.0 +): + """ + Generates a poster optimized for side-by-side patch comparison from a + series of high-resolution images. + + The layout is organized in rows: + - Row 1: Scaled-down full images. + - Row 2: Patch 1 from all images. + - Row 3: Patch 2 from all images. + - ... and so on. + - Final Row: Histograms for all images. + + Args: + image_paths (list): A list of file paths for the images to compare. + output_path (str, optional): Path to save the output poster. + patch_size (tuple, optional): The (width, height) of the area to crop + from the source image. + zoom_level (float, optional): The factor to enlarge the cropped patches. + """ + if not image_paths: + print("No image paths were provided.") + return + + # --- Layout & Font Configuration --- + padding = 25 + header_height = 40 + row_title_width = 150 + histogram_height = 200 + patch_display_size = (int(patch_size[0] * zoom_level), int(patch_size[1] * zoom_level)) + scaled_full_image_width = patch_display_size[0] + + try: + title_font = ImageFont.truetype("arialbd.ttf", 20) + header_font = ImageFont.truetype("arial.ttf", 16) + except IOError: + title_font = ImageFont.load_default() + header_font = ImageFont.load_default() + + # --- Determine Poster Dimensions from Master Image --- + with Image.open(image_paths[0]) as master_image: + master_width, master_height = master_image.size + # Define patch locations relative to image dimensions + patch_definitions = { + "Top Left": (0, 0, patch_size[0], patch_size[1]), + "Top Right": (master_width - patch_size[0], 0, master_width, patch_size[1]), + "Center": ( + (master_width - patch_size[0]) // 2, + (master_height - patch_size[1]) // 2, + (master_width + patch_size[0]) // 2, + (master_height + patch_size[1]) // 2, + ), + "Bottom Left": (0, master_height - patch_size[1], patch_size[0], master_height), + "Bottom Right": ( + master_width - patch_size[0], + master_height - patch_size[1], + master_width, + master_height, + ), + } + scaled_full_image_height = int(master_height * (scaled_full_image_width / master_width)) + + num_images = len(image_paths) + num_patch_rows = len(patch_definitions) + + # Calculate final poster dimensions + poster_width = row_title_width + num_images * (patch_display_size[0] + padding) + padding + total_rows_height = header_height + scaled_full_image_height + num_patch_rows * patch_display_size[1] + histogram_height + total_padding_height = (3 + num_patch_rows) * padding + poster_height = total_rows_height + total_padding_height + + # --- Create Poster Canvas --- + poster = Image.new("RGB", (poster_width, poster_height), "white") + draw = ImageDraw.Draw(poster) + + # --- 1. Draw Column Headers (Filenames) --- + y_offset = padding + for i, image_path in enumerate(image_paths): + filename = os.path.basename(image_path) + x_offset = row_title_width + i * (patch_display_size[0] + padding) + draw.text((x_offset, y_offset), filename, fill="black", font=header_font) + y_offset += header_height + + # --- 2. Draw Row 1: Scaled Full Images --- + draw.text((padding, y_offset + scaled_full_image_height // 2), "Full View", fill="black", font=title_font) + for i, image_path in enumerate(image_paths): + with Image.open(image_path) as img: + img.thumbnail((scaled_full_image_width, scaled_full_image_height)) + x_offset = row_title_width + i * (patch_display_size[0] + padding) + poster.paste(img, (x_offset, y_offset)) + y_offset += scaled_full_image_height + padding + + # --- 3. Draw Patch Rows --- + for patch_name, patch_area in patch_definitions.items(): + draw.text((padding, y_offset + patch_display_size[1] // 2), patch_name, fill="black", font=title_font) + for i, image_path in enumerate(image_paths): + with Image.open(image_path) as img: + patch = img.crop(patch_area) + zoomed_patch = patch.resize(patch_display_size, Image.Resampling.LANCZOS) + x_offset = row_title_width + i * (patch_display_size[0] + padding) + poster.paste(zoomed_patch, (x_offset, y_offset)) + # Add a border for clarity + draw.rectangle( + (x_offset, y_offset, x_offset + patch_display_size[0], y_offset + patch_display_size[1]), + outline="gray", width=1 + ) + y_offset += patch_display_size[1] + padding + + # --- 4. Draw Final Row: Histograms --- + draw.text((padding, y_offset + histogram_height // 2), "Histogram", fill="black", font=title_font) + for i, image_path in enumerate(image_paths): + histogram_path = f"temp_hist_{i}.png" + with Image.open(image_path) as img: + luminance_data = np.array(img.convert("L")) + + plt.figure(figsize=(6, 3)) + plt.hist(luminance_data.ravel(), bins=256, range=[0, 256], color='gray', ec='gray') + plt.title("Luminance") + plt.xlabel("Pixel Intensity") + plt.ylabel("Frequency") + plt.tight_layout() + plt.savefig(histogram_path) + plt.close() + + with Image.open(histogram_path) as hist_img: + hist_img.thumbnail((patch_display_size[0], histogram_height)) + x_offset = row_title_width + i * (patch_display_size[0] + padding) + poster.paste(hist_img, (x_offset, y_offset)) + os.remove(histogram_path) + + # --- Save Final Poster --- + poster.save(output_path) + print(f"Advanced comparison poster saved to {output_path}") + + +if __name__ == '__main__': + # --- Example Usage --- + # This block creates a set of dummy high-resolution images to demonstrate the script. + + test_dir = "high_res_test_images" + if not os.path.exists(test_dir): + os.makedirs(test_dir) + + # Using 4000x3000 as a stand-in for "high resolution" to keep the example fast. + # The script logic works identically for 50MP+ images. + width, height = 4000, 3000 + # list .jpg in dir + jpgdir = '/home/dubey/projects/filmsim/test_images/v1.4/05.DNG/' + image_files = [os.path.join(jpgdir, f) for f in os.listdir(jpgdir) if f.endswith('.jpg')] + + + # --- Generate the poster --- + # For high-res images, a larger patch size from the source is better. + create_advanced_comparison_poster(image_files, patch_size=(1000, 1000), zoom_level=2.5) \ No newline at end of file diff --git a/debug_stitch.py b/debug_stitch.py new file mode 100644 index 0000000..9875199 --- /dev/null +++ b/debug_stitch.py @@ -0,0 +1,147 @@ +import argparse +import re +from pathlib import Path +from PIL import Image, ImageDraw, ImageFont + +# --- Configuration --- +PADDING = 40 +FONT_SIZE = 48 +FONT_COLOR = "black" +ARROW_COLOR = "black" +BACKGROUND_COLOR = "white" +ARROW_WIDTH_RATIO = 0.3 +ARROW_HEIGHT_RATIO = 0.1 + +def parse_filename(filepath: Path): + """Extracts the step number and name from a filename.""" + # Pattern for standard steps like '..._02_log_exposure_RGB.jpg' + match = re.search(r'_(\d+)_([a-zA-Z0-9_]+?)_RGB\.', filepath.name) + if match: + step_name = match.group(2).replace('_', ' ').title() + return int(match.group(1)), step_name + + # Fallback pattern for the first input image like '..._input_linear_sRGB.jpg' + match_input = re.search(r'_input_([a-zA-Z0-9_]+?)_sRGB\.', filepath.name) + if match_input: + step_name = f"Input {match_input.group(1).replace('_', ' ').title()}" + return 0, step_name + + # If no pattern matches, return a generic name + return 999, filepath.stem.replace('_', ' ').title() + +def create_arrow(width: int, height: int, color: str) -> Image.Image: + """Creates a right-pointing arrow image with a transparent background.""" + arrow_img = Image.new("RGBA", (width, height), (0, 0, 0, 0)) + draw = ImageDraw.Draw(arrow_img) + + shaft_width = width * 0.7 + rect_start_y = (height // 2) - (height // 10) + rect_height = max(1, height // 5) + + draw.rectangle([(0, rect_start_y), (shaft_width, rect_start_y + rect_height)], fill=color) + draw.polygon([(shaft_width, 0), (width, height // 2), (shaft_width, height)], fill=color) + + return arrow_img + +def main(): + parser = argparse.ArgumentParser( + description="Create a visual pipeline of image processing steps with diffs.", + formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("input_dir", type=str, help="Directory containing the input and diff images.") + parser.add_argument("output_file", type=str, help="Path for the final combined image.") + parser.add_argument( + "--scale", type=float, default=0.25, + help="Scale factor for the main pipeline images (e.g., 0.25 for 25%%). Default is 0.25." + ) + parser.add_argument( + "--diff-scale", type=float, default=0.15, + help="Scale factor for the smaller diff images. Default is 0.15." + ) + args = parser.parse_args() + + input_path = Path(args.input_dir) + if not input_path.is_dir(): + print(f"Error: Input directory '{args.input_dir}' not found.") + return + + # 1. Find and sort all images + # --- THIS IS THE FIX --- + # Use a more general glob to find all .jpg files for the pipeline + pipeline_images_raw = list(input_path.glob("*.jpg")) + diff_images_raw = list(input_path.glob("diff_*_RGB.png")) + + if not pipeline_images_raw: + print("Error: No pipeline images (*.jpg) found in the directory.") + return + + # Sort files alphabetically by their full name, which mimics 'ls -1' behavior. + pipeline_images_sorted = sorted(pipeline_images_raw) + diff_images_sorted = sorted(diff_images_raw) + + print("Found and sorted the following pipeline images:") + for p in pipeline_images_sorted: + print(f" - {p.name}") + + # 2. Prepare images and assets + with Image.open(pipeline_images_sorted[0]) as img: + orig_w, orig_h = img.size + + img_w, img_h = int(orig_w * args.scale), int(orig_h * args.scale) + diff_w, diff_h = int(orig_w * args.diff_scale), int(orig_h * args.scale) + + pipeline_images = [Image.open(p).resize((img_w, img_h), Image.Resampling.LANCZOS) for p in pipeline_images_sorted] + diff_images = [Image.open(p).resize((diff_w, diff_h), Image.Resampling.LANCZOS) for p in diff_images_sorted] + + arrow_w, arrow_h = int(img_w * ARROW_WIDTH_RATIO), int(img_h * ARROW_HEIGHT_RATIO) + arrow = create_arrow(arrow_w, arrow_h, ARROW_COLOR) + + try: + font = ImageFont.truetype("arial.ttf", FONT_SIZE) + except IOError: + print("Arial font not found, using default font.") + font = ImageFont.load_default() + + # 3. Calculate canvas size + num_steps = len(pipeline_images) + gap_width = max(arrow_w, diff_w) + PADDING + total_width = (num_steps * img_w) + ((num_steps - 1) * gap_width) + (2 * PADDING) + total_height = PADDING + FONT_SIZE + PADDING + img_h + PADDING + diff_h + PADDING + + # 4. Create the final canvas and draw everything + canvas = Image.new("RGB", (total_width, total_height), BACKGROUND_COLOR) + draw = ImageDraw.Draw(canvas) + + y_text = PADDING + y_pipeline = y_text + FONT_SIZE + PADDING + y_arrow = y_pipeline + (img_h // 2) - (arrow_h // 2) + y_diff = y_pipeline + img_h + PADDING + current_x = PADDING + + for i, p_img in enumerate(pipeline_images): + canvas.paste(p_img, (current_x, y_pipeline)) + + _, step_name = parse_filename(pipeline_images_sorted[i]) + if step_name: + text_bbox = draw.textbbox((0, 0), step_name, font=font) + text_w = text_bbox[2] - text_bbox[0] + draw.text((current_x + (img_w - text_w) // 2, y_text), step_name, font=font, fill=FONT_COLOR) + + if i < num_steps - 1: + gap_start_x = current_x + img_w + + arrow_x = gap_start_x + (gap_width - arrow_w) // 2 + canvas.paste(arrow, (arrow_x, y_arrow), mask=arrow) + + if i < len(diff_images): + diff_x = gap_start_x + (gap_width - diff_w) // 2 + canvas.paste(diff_images[i], (diff_x, y_diff)) + + current_x += img_w + gap_width + + # 5. Save the final image + canvas.save(args.output_file, quality=95) + print(f"\nPipeline image successfully created at '{args.output_file}'") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/filmcolor b/filmcolor index 8116b1f..1151b20 100755 --- a/filmcolor +++ b/filmcolor @@ -7,27 +7,39 @@ # "imageio", # "rawpy", # "colour-science", +# "torch", +# "warp-lang", # ] +# [tool.uv.sources] +# torch = [{ index = "pytorch-cu128" }] +# [[tool.uv.index]] +# name = "pytorch-cu128" +# url = "https://download.pytorch.org/whl/cu128" +# explicit = true # /// import argparse -import csv -import numpy as np -import imageio.v3 as iio -from scipy.interpolate import interp1d -from scipy.ndimage import ( - gaussian_filter, - gaussian_filter1d, - map_coordinates, -) # For LUTs -import rawpy +import json +import math import os import sys -import colour -from colour.colorimetry import SDS_ILLUMINANTS -import json -from typing import Optional, List +from datetime import datetime from pathlib import Path +from typing import List, Optional + +import colour +import imageio.v3 as iio +import numpy as np +import rawpy +import torch +import torch.nn.functional as F +import warp as wp +from colour.colorimetry import SDS_ILLUMINANTS +from scipy.integrate import quad +from scipy.interpolate import interp1d +from scipy.ndimage import (gaussian_filter, gaussian_filter1d, # For LUTs + map_coordinates) +from scipy.signal.windows import gaussian # For creating Gaussian kernel # --- Configuration --- EPSILON = 1e-10 @@ -36,8 +48,80 @@ EPSILON = 1e-10 # 17 is faster to generate, 65 is more accurate but much slower to generate. LUT_SIZE = 65 +GLOBAL_DEBUG = False + + +# --- Global variables to ensure all debug images for a single run go to the same folder --- +# This creates a unique, timestamped directory for each script execution. +RUN_TIMESTAMP = datetime.now().strftime("%Y-%m-%d_%H%M%S") +DEBUG_OUTPUT_DIR = Path(f"debug_outputs/{RUN_TIMESTAMP}") + + +def save_debug_image(image: np.ndarray, tag: str, output_format: str = "jpeg"): + """ + Saves a debug image at any point in the processing pipeline. + + The function takes a NumPy array (assumed to be float data in the [0.0, 1.0] range), + a descriptive tag, and an optional format. It saves the image to a timestamped + sub-directory within a `debug_outputs` folder. + + Args: + image (np.ndarray): The image data to save. Expected to be a floating-point + array with values scaled between 0.0 and 1.0. + tag (str): A descriptive name for this processing stage (e.g., + "after_exposure_lut", "final_linear_image"). This will be + part of the filename. + output_format (str, optional): The desired output format. Can be 'jpeg' (or 'jpg') + for 8-bit JPEG, or 'tiff' (or 'tif') for 16-bit + TIFF. Defaults to 'jpeg'. + """ + global GLOBAL_DEBUG, DEBUG_OUTPUT_DIR + if not GLOBAL_DEBUG: + # If global debug is off, do nothing + return + try: + # 1. Ensure the debug directory for this run exists. + DEBUG_OUTPUT_DIR.mkdir(parents=True, exist_ok=True) + + # 2. Sanitize the tag to create a safe filename. + # Removes invalid characters and replaces spaces with underscores. + safe_tag = ( + "".join(c for c in tag if c.isalnum() or c in (" ", "_", "-")) + .rstrip() + .replace(" ", "_") + ) + + # 3. Create a unique filename with a precise timestamp. + image_timestamp = datetime.now().strftime("%H%M%S_%f") + + # 4. Determine file extension and prepare the final image data. + # Clip the float image data to the [0.0, 1.0] range to prevent wrap-around + # errors or artifacts when converting to integer types. + clipped_image = np.clip(image, 0.0, 1.0) + + if "tif" in output_format.lower(): + # Convert to 16-bit for TIFF + output_image = (clipped_image * 65535.0).astype(np.uint16) + extension = "tiff" + else: + # Convert to 8-bit for JPEG + output_image = (clipped_image * 255.0).astype(np.uint8) + extension = "jpg" + + filename = f"{image_timestamp}_{safe_tag}.{extension}" + filepath = DEBUG_OUTPUT_DIR / filename + + # 5. Save the image and print a confirmation message. + iio.imwrite(filepath, output_image) + print(f"✅ DEBUG: Saved '{tag}' to '{filepath}'") + + except Exception as e: + # If anything goes wrong, print a warning but don't crash the main script. + print( + f"⚠️ DEBUG WARNING: Could not save debug image for tag '{tag}'.\n Reason: {e}" + ) + -# --- Data Structures (unchanged) --- class Info: name: str description: str @@ -206,6 +290,53 @@ class FilmDatasheet: self.info, self.processing, self.properties = info, processing, properties +### NEW: GPU-accelerated separable Gaussian blur helper +def _gpu_separable_gaussian_blur( + image_tensor: torch.Tensor, sigma: float, device: str = "cpu" +) -> torch.Tensor: + """ + Applies a fast, separable Gaussian blur to a tensor on a specified device. + + Args: + image_tensor (torch.Tensor): The input image tensor, shape (B, C, H, W). + sigma (float): The standard deviation of the Gaussian kernel. + device (str): The device to run on ('cpu', 'cuda', 'mps'). + + Returns: + torch.Tensor: The blurred image tensor. + """ + kernel_radius = int(math.ceil(3 * sigma)) + kernel_size = 2 * kernel_radius + 1 + + # Create a 1D Gaussian kernel + x = torch.arange( + -kernel_radius, kernel_radius + 1, dtype=torch.float32, device=device + ) + kernel_1d = torch.exp(-0.5 * (x / sigma) ** 2) + kernel_1d /= kernel_1d.sum() + + # Get tensor shape + B, C, H, W = image_tensor.shape + + # Prepare kernels for 2D convolution + # To blur horizontally, the kernel shape is (C, 1, 1, kernel_size) + kernel_h = kernel_1d.view(1, 1, 1, kernel_size).repeat(C, 1, 1, 1) + + # To blur vertically, the kernel shape is (C, 1, kernel_size, 1) + kernel_v = kernel_1d.view(1, 1, kernel_size, 1).repeat(C, 1, 1, 1) + + # Apply horizontal blur + # padding='same' requires PyTorch 1.9+. For older versions, calculate padding manually. + padding_h = (kernel_size // 2, 0) + blurred_tensor = F.conv2d(image_tensor, kernel_h, padding=padding_h, groups=C) + + # Apply vertical blur + padding_v = (0, kernel_size // 2) + blurred_tensor = F.conv2d(blurred_tensor, kernel_v, padding=padding_v, groups=C) + + return blurred_tensor + + # --- Datasheet Parsing (unchanged) --- def parse_datasheet_json(json_filepath) -> FilmDatasheet | None: # This function remains identical to your last version @@ -294,31 +425,52 @@ def parse_datasheet_json(json_filepath) -> FilmDatasheet | None: ### PERFORMANCE ### -def apply_3d_lut(image: np.ndarray, lut: np.ndarray) -> np.ndarray: +def apply_3d_lut(image: np.ndarray, lut: np.ndarray, device="cpu") -> np.ndarray: """ - Applies a 3D LUT to an image using trilinear interpolation. + Applies a 3D LUT to an image using PyTorch's grid_sample. Args: - image: Input image, shape (H, W, 3), values expected in [0, 1]. + image: Input image, shape (H, W, 3), values in [0, 1]. lut: 3D LUT, shape (N, N, N, 3). + device: 'cpu' or 'cuda' for GPU acceleration. Returns: Output image, shape (H, W, 3). """ - lut_dim = lut.shape[0] + # Ensure data is float32, which is standard for torch + if image.dtype != np.float32: + image = image.astype(np.float32) + if lut.dtype != np.float32: + lut = lut.astype(np.float32) - # Scale image coordinates to LUT indices - scaled_coords = image * (lut_dim - 1) + # Convert to torch tensors and move to the target device + image_torch = torch.from_numpy(image).to(device) + lut_torch = torch.from_numpy(lut).to(device) - # map_coordinates requires coordinates in shape (3, H, W) - coords = np.transpose(scaled_coords, (2, 0, 1)) + # Prepare image coordinates for grid_sample + # grid_sample expects coordinates in the range [-1, 1] + # The image is used as the grid of sampling coordinates + # We add a batch and depth dimension to match the 5D input requirement + # Shape: (H, W, 3) -> (1, 1, H, W, 3) + grid = image_torch * 2 - 1 # Scale from [0, 1] to [-1, 1] + grid = grid.unsqueeze(0).unsqueeze(0) # Add batch and depth dims - # Interpolate each output channel - out_r = map_coordinates(lut[..., 0], coords, order=1, mode="nearest") - out_g = map_coordinates(lut[..., 1], coords, order=1, mode="nearest") - out_b = map_coordinates(lut[..., 2], coords, order=1, mode="nearest") + # Prepare LUT for grid_sample + # grid_sample expects the input grid to be (N, C, D_in, H_in, W_in) + # Our LUT is (N, N, N, 3), which is (D, H, W, C) + # Permute to (C, D, H, W) -> (3, N, N, N) and add a batch dimension + # Shape: (N, N, N, 3) -> (1, 3, N, N, N) + lut_torch = lut_torch.permute(3, 0, 1, 2).unsqueeze(0) - return np.stack([out_r, out_g, out_b], axis=-1) + # Apply the LUT + # align_corners=True ensures that the corners of the LUT cube map to -1 and 1 + result_torch = F.grid_sample(lut_torch, grid, mode="bilinear", align_corners=True) + + # Reshape the output and convert back to numpy + # Shape: (1, 3, 1, H, W) -> (H, W, 3) + result_numpy = result_torch.squeeze().permute(1, 2, 0).cpu().numpy() + + return result_numpy ### PERFORMANCE ### @@ -484,7 +636,7 @@ def apply_hd_curves( y_points, kind="linear", bounds_error=False, - fill_value=(y_points[0], y_points[-1]), # type: ignore + fill_value=(y_points[0], y_points[-1]), # type: ignore ) density_rgb[..., i] = np.maximum( interp_func(np.clip(log_exposure_adjusted, min_logE, max_logE)) @@ -536,19 +688,21 @@ def white_balance_to_d65(image_linear_srgb: np.ndarray) -> np.ndarray: # However, if the image has a color cast, its *actual* illuminant is not D65. # We are adapting from the *estimated* illuminant back to the *ideal* D65. srgb_cs = colour.models.RGB_COLOURSPACE_sRGB - + # The target illuminant is D65. We get its XYZ value from colour-science. - target_illuminant_xyz = colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'] + target_illuminant_xyz = colour.CCS_ILLUMINANTS[ + "CIE 1931 2 Degree Standard Observer" + ]["D65"] # Convert our estimated RGB illuminant to XYZ. # source_illuminant_rgb are assumed to be in the sRGB colourspace. # srgb_cs is the sRGB colourspace definition, which includes its D65 whitepoint and conversion matrix. source_illuminant_xyz = colour.RGB_to_XYZ( source_illuminant_rgb, # RGB values to convert - srgb_cs, # Definition of the RGB colourspace these values are in (sRGB) - srgb_cs.whitepoint # CIE XYZ of the illuminant for the sRGB colourspace (D65) - # This ensures conversion without further chromatic adaptation at this step, - # as the input RGB values are already adapted to srgb_cs.whitepoint. + srgb_cs, # Definition of the RGB colourspace these values are in (sRGB) + srgb_cs.whitepoint, # CIE XYZ of the illuminant for the sRGB colourspace (D65) + # This ensures conversion without further chromatic adaptation at this step, + # as the input RGB values are already adapted to srgb_cs.whitepoint. ) # 3. Perform the chromatic adaptation. @@ -557,8 +711,8 @@ def white_balance_to_d65(image_linear_srgb: np.ndarray) -> np.ndarray: image_linear_srgb, source_illuminant_xyz, target_illuminant_xyz, - method='Von Kries', - transform='CAT02' # CAT02 is a well-regarded choice + method="Von Kries", + transform="CAT02", # CAT02 is a well-regarded choice ) # 4. Clip to ensure values remain valid. @@ -661,7 +815,329 @@ def calculate_film_log_exposure( return np.log10(film_exposure_values + EPSILON) + log_shift -# --- Main Execution --- +def apply_spatial_effects_new( + image: np.ndarray, + film_format_mm: int, + interlayerData: Interlayer, + halationData: Halation, + image_width_px: int, + device: str = "cpu", + halation_threshold: float = 0.8, +) -> np.ndarray: + """ + Applies realistic, performant spatial effects (interlayer diffusion and halation). + + Args: + image (np.ndarray): Input linear image array (H, W, 3). + film_format_mm (int): Width of the film format in mm. + interlayerData (Interlayer): Object with interlayer diffusion data. + halationData (Halation): Object with halation strength and size data. + image_width_px (int): Width of the input image in pixels. + device (str): Device for torch operations ('cpu', 'cuda', 'mps'). + halation_threshold (float): Linear brightness value above which halation occurs. + + Returns: + np.ndarray: Image with spatial effects applied. + """ + # 1. Setup: Convert numpy image to a torch tensor for GPU processing + # Tensor shape convention is (Batch, Channels, Height, Width) + image_tensor = ( + torch.from_numpy(image.astype(np.float32)) + .permute(2, 0, 1) + .unsqueeze(0) + .to(device) + ) + + # 2. Interlayer Diffusion: A subtle, overall blur simulating light scatter. + sigma_pixels_diffusion = um_to_pixels( + interlayerData.diffusion_um, image_width_px, film_format_mm + ) + if sigma_pixels_diffusion > EPSILON: + diffused_tensor = _gpu_separable_gaussian_blur( + image_tensor, sigma_pixels_diffusion, device + ) + else: + diffused_tensor = image_tensor + + # 3. Halation: A more physically-based model + # a. Create a "halation source" mask from the brightest parts of the image. + luminance = torch.einsum( + "bchw, c -> bhw", + [diffused_tensor, torch.tensor([0.2126, 0.7152, 0.0722]).to(device)], + ).unsqueeze(1) + + # Apply threshold: only bright areas contribute to the glow. + halation_source = torch.relu(luminance - halation_threshold) + + # b. Create the colored glow by blurring the source mask with per-channel settings. + halation_strengths = torch.tensor( + [halationData.strength.r, halationData.strength.g, halationData.strength.b], + device=device, + ).view(1, 3, 1, 1) + + halation_sizes_px = [ + um_to_pixels(halationData.size_um.r, image_width_px, film_format_mm), + um_to_pixels(halationData.size_um.g, image_width_px, film_format_mm), + um_to_pixels(halationData.size_um.b, image_width_px, film_format_mm), + ] + + # Use a downsampling-upsampling pyramid for a very fast, high-quality large blur. + # This is much faster than a single large convolution. + halation_glow = torch.zeros_like(diffused_tensor) + + # Initial source for the pyramid + mip_source = halation_source + + for i in range(4): # 4 levels of downsampling for a wide, soft glow + # Blur the current mip level + mip_blurred = _gpu_separable_gaussian_blur( + mip_source, 2.0, device + ) # Small blur at each level + + # Upsample to full size to be added to the final glow + # Note: 'bilinear' upsampling ensures a smooth result + upsampled_glow = F.interpolate( + mip_blurred, + size=diffused_tensor.shape[2:], + mode="bilinear", + align_corners=False, + ) + + # Add this layer's contribution to the total glow + halation_glow += upsampled_glow + + # Downsample for the next iteration, if not the last level + if i < 3: + mip_source = F.max_pool2d(mip_source, kernel_size=2, stride=2) + + # c. Apply color tint and strength, then add the final glow to the image + # The glow is tinted by the halation strength colors and added to the diffused image + final_image_tensor = diffused_tensor + (halation_glow * halation_strengths) + + # 4. Finalize: Clip values and convert back to a numpy array + final_image_tensor = torch.clamp(final_image_tensor, 0.0, 1.0) + result_numpy = final_image_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy() + + return result_numpy + + +def calculate_and_apply_exposure_correction(final_image_to_save): + patch_ratio = 0.8 + + # --- Target Patch Extraction --- + h, w, _ = final_image_to_save.shape + patch_h = int(h * patch_ratio) + patch_w = int(w * patch_ratio) + + # Calculate top-left corner of the central patch + start_h = (h - patch_h) // 2 + start_w = (w - patch_w) // 2 + + # Extract the patch using numpy slicing + patch = final_image_to_save[ + start_h : start_h + patch_h, start_w : start_w + patch_w + ] + + # --- Colorimetric Measurement of the Patch --- + # Calculate the mean sRGB value of the patch + avg_rgb_patch = np.mean(patch, axis=(0, 1)) + print(f" - Average linear sRGB of patch: {np.round(avg_rgb_patch, 4)}") + + # Define our target: In linear space, middle gray is typically around 0.18 (18%) + # This corresponds to L*≈46.6 in CIELAB space, not 50 + target_luminance_Y = 0.18 # Standard photographic middle gray + + # For reference, calculate what L* value this corresponds to + target_lab_L = colour.XYZ_to_Lab( + np.array([target_luminance_Y, target_luminance_Y, target_luminance_Y]) + )[0] + print(f" - Target middle gray L*: {target_lab_L:.1f}") + + # --- Calculating the Scaling Factor --- + # Convert the average patch sRGB to CIE XYZ + input_xyz = colour.sRGB_to_XYZ(avg_rgb_patch) + input_luminance_Y = input_xyz[1] + + print(f" - Input patch luminance (Y): {input_luminance_Y:.4f}") + print(f" - Target middle gray luminance (Y): {target_luminance_Y:.4f}") + + # The scale factor is the ratio of target luminance to input luminance + # Avoid division by zero for black patches + if input_luminance_Y < EPSILON: + scale_factor = 1.0 # Cannot correct a black patch, so do nothing + else: + scale_factor = target_luminance_Y / input_luminance_Y + + print(f" - Calculated exposure scale factor: {scale_factor:.4f}") + ev_change = np.log2(scale_factor) + print(f" - Equivalent EV change: {ev_change:+.2f} EV") + # cleamp scale_factor to a ev change of +/- 1.5 + if ev_change < -1.5: + scale_factor = 2**-1.5 + elif ev_change > 1.5: + scale_factor = 2**1.5 + ev_change = np.log2(scale_factor) + print( + f" - Clamped exposure scale factor: {scale_factor:.4f} (EV change: {ev_change:+.2f})" + ) + + # --- Applying the Correction --- + # Apply the calculated scale factor to the entire image + final_image_to_save = final_image_to_save * scale_factor + + # Clip the result to the valid [0.0, 1.0] range + final_image_to_save = np.clip(final_image_to_save, 0.0, 1.0) + return final_image_to_save + + +def apply_shades_of_grey_wb(final_image_to_save): + print("Applying Shades of Gray white balance...") + + # Parameters for the white balance algorithm + p = 6 # Minkowski norm parameter (p=1 is Gray World, p=∞ is White Patch) + clip_percentile = 5 # Percentage of pixels to clip (for robustness) + epsilon = 1e-10 # Small value to avoid division by zero + + # Helper functions for linearization + def _linearize_srgb(x): + # Convert from sRGB to linear RGB + return np.where(x <= 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4) + + def _delinearize_srgb(x): + # Convert from linear RGB to sRGB + return np.where(x <= 0.0031308, 12.92 * x, 1.055 * (x ** (1 / 2.4)) - 0.055) + + # Work with a copy of the image + + img_float = final_image_to_save.copy() + + # Extract RGB channels + r = img_float[..., 0] + g = img_float[..., 1] + b = img_float[..., 2] + + # Handle clipping of dark and bright pixels for robustness + if clip_percentile > 0: + lower = np.percentile(img_float, clip_percentile) + upper = np.percentile(img_float, 100 - clip_percentile) + img_float = np.clip(img_float, lower, upper) + r = img_float[..., 0] + g = img_float[..., 1] + b = img_float[..., 2] + + # Calculate illuminant estimate using the Minkowski p-norm + if p == float("inf"): + # White Patch (max RGB) case + illum_r = np.max(r) + epsilon + illum_g = np.max(g) + epsilon + illum_b = np.max(b) + epsilon + else: + # Standard Minkowski norm + illum_r = np.power(np.mean(np.power(r, p)), 1 / p) + epsilon + illum_g = np.power(np.mean(np.power(g, p)), 1 / p) + epsilon + illum_b = np.power(np.mean(np.power(b, p)), 1 / p) + epsilon + + # The illuminant is the estimated color of the light source + illuminant = np.array([illum_r, illum_g, illum_b]) + + # The "gray" value is typically the average of the illuminant channels + gray_val = np.mean(illuminant) + + # Calculate scaling factors + scale_r = gray_val / illum_r + scale_g = gray_val / illum_g + scale_b = gray_val / illum_b + + # Apply scaling factors to the original (non-clipped) image + r_orig = final_image_to_save[..., 0] + g_orig = final_image_to_save[..., 1] + b_orig = final_image_to_save[..., 2] + + # Apply the white balance correction + balanced_r = r_orig * scale_r + balanced_g = g_orig * scale_g + balanced_b = b_orig * scale_b + + # Merge channels + balanced_img = np.stack([balanced_r, balanced_g, balanced_b], axis=-1) + + # Clip to valid range + final_image_to_save = np.clip(balanced_img, 0.0, 1.0) + return final_image_to_save + + +def apply_darktable_color_calibration(final_image_to_save): + print("Applying DarkTable-style Color Calibration white balance...") + + # --- Illuminant Estimation --- + # Get original shape and prepare for sampling + height, width, _ = final_image_to_save.shape + + # Sample the central 80% of the image area to estimate the illuminant + # This is more robust than using the whole image, which might have black borders etc. + area_ratio = 0.8 + scale = np.sqrt(area_ratio) + sample_width = int(width * scale) + sample_height = int(height * scale) + x_offset = (width - sample_width) // 2 + y_offset = (height - sample_height) // 2 + + # Extract the central patch for sampling + patch = final_image_to_save[ + y_offset : y_offset + sample_height, x_offset : x_offset + sample_width + ] + + # Estimate the source illuminant using the Gray World assumption on the patch + # The average color of the scene is assumed to be the illuminant color. + source_rgb_avg = np.mean(patch, axis=(0, 1)) + + # Avoid issues with pure black patches + if np.all(source_rgb_avg < EPSILON): + print(" - Patch is black, skipping white balance.") + return final_image_to_save + + # Convert the average RGB value to CIE XYZ. This represents our source illuminant. + # We use the sRGB colorspace definition, which assumes a D65 reference white for the RGB values. + source_illuminant_xyz = colour.RGB_to_XYZ( + source_rgb_avg, + colour.models.RGB_COLOURSPACE_sRGB, + colour.models.RGB_COLOURSPACE_sRGB.whitepoint, + ) + + # --- Chromatic Adaptation --- + # Define the target illuminant. For sRGB output, this should be D65. + # Using D65 ensures that neutral colors in the scene are mapped to neutral + # colors in the final sRGB image. + target_illuminant_xyz = colour.CCS_ILLUMINANTS[ + "CIE 1931 2 Degree Standard Observer" + ]["D65"] + + # Ensure target_illuminant_xyz has 3 components (sometimes returns only x,y) + if len(target_illuminant_xyz) == 2: + # Convert xyY to XYZ assuming Y=1 + x, y = target_illuminant_xyz + X = x / y + Y = 1.0 + Z = (1 - x - y) / y + target_illuminant_xyz = np.array([X, Y, Z]) + + print(f" - Source Illuminant (XYZ): {np.round(source_illuminant_xyz, 3)}") + print(f" - Target Illuminant (XYZ): {np.round(target_illuminant_xyz, 3)}") + + # Apply chromatic adaptation to the entire image. + # This transforms the image colors as if the scene was lit by the target illuminant. + # CAT16 is a modern and accurate Chromatic Adaptation Transform. + final_image_to_save = colour.adaptation.chromatic_adaptation( + final_image_to_save, + source_illuminant_xyz, # Source illuminant XYZ + target_illuminant_xyz, # Target illuminant (D65) + method="Von Kries", + transform="CAT16", + ) + + # Clip to valid range and return + final_image_to_save = np.clip(final_image_to_save, 0.0, 1.0) + return final_image_to_save def main(): @@ -675,10 +1151,58 @@ def main(): parser.add_argument("datasheet_json", help="Path to the film datasheet JSON file.") parser.add_argument("output_image", help="Path to save the output emulated image.") parser.add_argument( - "--no-cache", action="store_true", help="Force regeneration of LUTs and do not save them." + "--no-cache", + action="store_true", + help="Force regeneration of LUTs and do not save them.", ) parser.add_argument( - "--force-d65", action="store_true", help="Force white balance when loading RAW files." + "--force-d65", + action="store_true", + help="Force white balance when loading RAW files.", + ) + # --- Modified argument for border --- + parser.add_argument( + "--border", + action="store_true", + help="Add a 10%% border with the film base color and stock info.", + ) + parser.add_argument( + "--gpu", action="store_true", help="Use GPU for LUT processing if available." + ) + parser.add_argument( + "--print-film-base-color", + help="Outputs the film base color determined from HD curve as a CIEXYZ value (D65 illuminant).", + action="store_true", + ) + parser.add_argument( + "--perform-negative-correction", + action="store_true", + help="Apply negative film correction based on the datasheet base color.", + ) + parser.add_argument( + "--perform-white-balance", + action="store_true", + help="Apply white balance to the final result. This is only effective when also using --perform-negative-correction.", + ) + parser.add_argument( + "--perform-exposure-correction", + action="store_true", + help="Apply exposure correction to the final result. This is only effective when also using --perform-negative-correction. Exposure correction is applied before white balance (if white balancing).", + ) + parser.add_argument( + "--raw-auto-exposure", + action="store_true", + help="Automatically adjust exposure for RAW images on load.", + ) + parser.add_argument( + "--simulate-grain", + action="store_true", + help="Simulate film grain in the output image.", + ) + parser.add_argument( + "--mono-grain", + action="store_true", + help="Simulate monochrome film grain in the output image.", ) args = parser.parse_args() @@ -689,22 +1213,30 @@ def main(): f"Simulating: {datasheet.info.name} ({datasheet.info.format_mm}mm) (v{datasheet.info.version})\n\t{datasheet.info.description}" ) + lut_device = "cuda" if args.gpu and torch.cuda.is_available() else "cpu" + # Check for Apple ARM support + if lut_device == "cuda" and torch.backends.mps.is_available(): + print("Using Apple MPS backend for GPU acceleration.") + lut_device = "mps" + # --- LUT Generation and Pre-computation --- cache_dir = Path.home() / ".filmsim" # Sanitize datasheet name for use in a filename - safe_name = "".join(c for c in datasheet.info.name if c.isalnum() or c in (' ', '_')).rstrip() - base_filename = f"{safe_name.replace(' ', '_')}_v{datasheet.info.version}_size{LUT_SIZE}" + safe_name = "".join( + c for c in datasheet.info.name if c.isalnum() or c in (" ", "_") + ).rstrip() + base_filename = ( + f"{safe_name.replace(' ', '_')}_v{datasheet.info.version}_size{LUT_SIZE}" + ) exposure_lut_path = cache_dir / f"{base_filename}_exposure.cube" naive_density_lut_path = cache_dir / f"{base_filename}_naive_density.cube" uncoupled_density_lut_path = cache_dir / f"{base_filename}_uncoupled_density.cube" user_consent_to_save = None middle_gray_logE = float(datasheet.properties.calibration.middle_gray_logE) - + exposure_lut: Optional[np.ndarray] = None # Initialize exposure_lut - ### PERFORMANCE ### - # Try to load exposure LUT from cache first if not args.no_cache and exposure_lut_path.exists(): try: print(f"Loading exposure LUT from cache: {exposure_lut_path}") @@ -714,10 +1246,14 @@ def main(): print(f"Successfully loaded exposure LUT from {exposure_lut_path}") else: # Log a warning and fall through to regeneration - print(f"Warning: Cached exposure LUT {exposure_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.") + print( + f"Warning: Cached exposure LUT {exposure_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate." + ) except Exception as e: # Log a warning and fall through to regeneration - print(f"Warning: Error loading exposure LUT from {exposure_lut_path}: {e}. Will regenerate.") + print( + f"Warning: Error loading exposure LUT from {exposure_lut_path}: {e}. Will regenerate." + ) # If LUT wasn't loaded from cache (or cache was disabled, or loading failed), generate it if exposure_lut is None: @@ -728,26 +1264,44 @@ def main(): middle_gray_logE, size=LUT_SIZE, ) - exposure_lut = generated_exposure_lut_table # Assign to the main variable used later + exposure_lut = ( + generated_exposure_lut_table # Assign to the main variable used later + ) # Save the newly generated LUT if caching is enabled if not args.no_cache: - if user_consent_to_save is None: # Ask for consent only once per run if needed - response = input(f"Save generated LUTs to {cache_dir} for future use? [Y/n]: ").lower() - user_consent_to_save = response in ('y', 'yes', '') - + if ( + user_consent_to_save is None + ): # Ask for consent only once per run if needed + response = input( + f"Save generated LUTs to {cache_dir} for future use? [Y/n]: " + ).lower() + user_consent_to_save = response in ("y", "yes", "") + if user_consent_to_save: try: cache_dir.mkdir(parents=True, exist_ok=True) - colour.write_LUT(colour.LUT3D(table=generated_exposure_lut_table, name=f"{base_filename}_exposure"), str(exposure_lut_path)) + colour.write_LUT( + colour.LUT3D( + table=generated_exposure_lut_table, + name=f"{base_filename}_exposure", + ), + str(exposure_lut_path), + ) print(f"Saved exposure LUT to {exposure_lut_path}") except Exception as e: - print(f"Error saving exposure LUT to {exposure_lut_path}: {e}", file=sys.stderr) - + print( + f"Error saving exposure LUT to {exposure_lut_path}: {e}", + file=sys.stderr, + ) + # Ensure exposure_lut is now populated. If not, something went wrong. if exposure_lut is None: # This case should ideally not be reached if create_exposure_lut always returns a valid LUT or raises an error. - print("Critical error: Exposure LUT could not be loaded or generated. Exiting.", file=sys.stderr) + print( + "Critical error: Exposure LUT could not be loaded or generated. Exiting.", + file=sys.stderr, + ) sys.exit(1) # For density LUTs, we need to know the typical range of log exposure values @@ -762,13 +1316,19 @@ def main(): loaded_obj = colour.read_LUT(str(naive_density_lut_path)) if isinstance(loaded_obj, colour.LUT3D): density_lut_naive = loaded_obj.table - print(f"Successfully loaded naive density LUT from {naive_density_lut_path}") + print( + f"Successfully loaded naive density LUT from {naive_density_lut_path}" + ) else: # Log a warning and fall through to regeneration - print(f"Warning: Cached naive density LUT {naive_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.") + print( + f"Warning: Cached naive density LUT {naive_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate." + ) except Exception as e: # Log a warning and fall through to regeneration - print(f"Warning: Error loading naive density LUT from {naive_density_lut_path}: {e}. Will regenerate.") + print( + f"Warning: Error loading naive density LUT from {naive_density_lut_path}: {e}. Will regenerate." + ) if density_lut_naive is None: print("Generating new naive density LUT...") @@ -781,44 +1341,64 @@ def main(): ) if not args.no_cache: if user_consent_to_save is None: - response = input(f"Save generated naive density LUT to {cache_dir} for future use? [Y/n]: ").lower() - user_consent_to_save = response in ('y', 'yes', '') + response = input( + f"Save generated naive density LUT to {cache_dir} for future use? [Y/n]: " + ).lower() + user_consent_to_save = response in ("y", "yes", "") if user_consent_to_save: try: cache_dir.mkdir(parents=True, exist_ok=True) colour.write_LUT( - colour.LUT3D(table=density_lut_naive, name=f"{base_filename}_naive_density"), + colour.LUT3D( + table=density_lut_naive, + name=f"{base_filename}_naive_density", + ), str(naive_density_lut_path), ) print(f"Saved naive density LUT to {naive_density_lut_path}") except Exception as e: - print(f"Error saving naive density LUT to {naive_density_lut_path}: {e}", file=sys.stderr) - + print( + f"Error saving naive density LUT to {naive_density_lut_path}: {e}", + file=sys.stderr, + ) + if density_lut_naive is None: # This case should ideally not be reached if create_density_lut always returns a valid LUT or raises an error. - print("Critical error: Naive density LUT could not be loaded or generated. Exiting.", file=sys.stderr) + print( + "Critical error: Naive density LUT could not be loaded or generated. Exiting.", + file=sys.stderr, + ) sys.exit(1) - inhibitor_matrix = compute_inhibitor_matrix( datasheet.properties.couplers.dir_amount_rgb, datasheet.properties.couplers.dir_diffusion_interlayer, ) + density_lut_uncoupled: Optional[np.ndarray] = None # Initialize if not args.no_cache and uncoupled_density_lut_path.exists(): try: - print(f"Loading uncoupled density LUT from cache: {uncoupled_density_lut_path}") + print( + f"Loading uncoupled density LUT from cache: {uncoupled_density_lut_path}" + ) loaded_obj = colour.read_LUT(str(uncoupled_density_lut_path)) if isinstance(loaded_obj, colour.LUT3D): density_lut_uncoupled = loaded_obj.table - print(f"Successfully loaded uncoupled density LUT from {uncoupled_density_lut_path}") + print( + f"Successfully loaded uncoupled density LUT from {uncoupled_density_lut_path}" + ) else: # Log a warning and fall through to regeneration - print(f"Warning: Cached uncoupled density LUT {uncoupled_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate.") + print( + f"Warning: Cached uncoupled density LUT {uncoupled_density_lut_path} is not a LUT3D (type: {type(loaded_obj)}). Will regenerate." + ) except Exception as e: # Log a warning and fall through to regeneration - print(f"Warning: Error loading uncoupled density LUT from {uncoupled_density_lut_path}: {e}. Will regenerate.") - else: + print( + f"Warning: Error loading uncoupled density LUT from {uncoupled_density_lut_path}: {e}. Will regenerate." + ) + + if density_lut_uncoupled is None: uncoupled_hd_curve = compute_uncoupled_hd_curves( datasheet.properties.curves.hd, inhibitor_matrix ) @@ -831,22 +1411,35 @@ def main(): ) if not args.no_cache: if user_consent_to_save is None: - response = input(f"Save generated uncoupled density LUT to {cache_dir} for future use? [Y/n]: ").lower() - user_consent_to_save = response in ('y', 'yes', '') + response = input( + f"Save generated uncoupled density LUT to {cache_dir} for future use? [Y/n]: " + ).lower() + user_consent_to_save = response in ("y", "yes", "") if user_consent_to_save: try: cache_dir.mkdir(parents=True, exist_ok=True) colour.write_LUT( - colour.LUT3D(table=density_lut_uncoupled, name=f"{base_filename}_uncoupled_density"), + colour.LUT3D( + table=density_lut_uncoupled, + name=f"{base_filename}_uncoupled_density", + ), str(uncoupled_density_lut_path), ) - print(f"Saved uncoupled density LUT to {uncoupled_density_lut_path}") + print( + f"Saved uncoupled density LUT to {uncoupled_density_lut_path}" + ) except Exception as e: - print(f"Error saving uncoupled density LUT to {uncoupled_density_lut_path}: {e}", file=sys.stderr) - + print( + f"Error saving uncoupled density LUT to {uncoupled_density_lut_path}: {e}", + file=sys.stderr, + ) + if density_lut_uncoupled is None: # This case should ideally not be reached if create_density_lut always returns a valid LUT or raises an error. - print("Critical error: Uncoupled density LUT could not be loaded or generated. Exiting.", file=sys.stderr) + print( + "Critical error: Uncoupled density LUT could not be loaded or generated. Exiting.", + file=sys.stderr, + ) sys.exit(1) # --- Load and Prepare Input Image --- @@ -855,15 +1448,22 @@ def main(): args.input_image.lower().endswith(ext) for ext in [".dng", ".raw", ".arw"] ): with rawpy.imread(args.input_image) as raw: + wb_params = {} if args.force_d65: - wb_params = {'user_wb': (1.0, 1.0, 1.0, 1.0)} # D65 white balance + # Use a neutral white balance if forcing D65, as post-process will handle it. + # Note: rawpy's 'camera' wb is often the best starting point. Forcing D65 might + # be better handled by a different method if colors seem off. + wb_params = {"user_wb": (1.0, 1.0, 1.0, 1.0)} + else: + wb_params = {"use_camera_wb": True} + image_raw = raw.postprocess( - demosaic_algorithm=rawpy.DemosaicAlgorithm.AHD, # type: ignore + demosaic_algorithm=rawpy.DemosaicAlgorithm.AHD, # type: ignore output_bps=16, - gamma=(1.0, 1.0), - no_auto_bright=True, - output_color=rawpy.ColorSpace.sRGB, # type: ignore - **(wb_params if args.force_d65 else {'user_camera_wb': True}), + gamma=(1, 1), # Linear gamma + no_auto_bright=args.raw_auto_exposure, # type: ignore + output_color=rawpy.ColorSpace.sRGB, # type: ignore + **wb_params, ) image_linear = image_raw.astype(np.float64) / 65535.0 print("RAW file processed to linear floating point directly.") @@ -887,11 +1487,14 @@ def main(): image_linear = image_linear[..., :3] image_linear, image_width_px = np.maximum(image_linear, 0.0), image_linear.shape[1] + save_debug_image(image_linear, "01_input_linear_sRGB") + # --- Pipeline Steps --- ### PERFORMANCE ### # 1. Convert Linear RGB to Log Exposure using the LUT print("Applying exposure LUT...") - log_exposure_rgb = apply_3d_lut(image_linear, exposure_lut) + log_exposure_rgb = apply_3d_lut(image_linear, exposure_lut, device=lut_device) + save_debug_image(log_exposure_rgb, "02_log_exposure_RGB") # 2. Apply DIR Coupler Simulation print("Applying DIR coupler simulation...") @@ -900,7 +1503,9 @@ def main(): (log_exposure_rgb - log_exposure_range[0]) / (log_exposure_range[1] - log_exposure_range[0]), density_lut_naive, + device=lut_device, ) + save_debug_image(naive_density_rgb, "03_naive_density_RGB") # naive_density_rgb = apply_hd_curves(log_exposure_rgb, datasheet.processing, datasheet.properties.curves.hd, middle_gray_logE) # Apply H&D curves to naive density # 2b. Use this density to modify log exposure @@ -912,6 +1517,7 @@ def main(): datasheet.info.format_mm, image_width_px, ) + save_debug_image(modified_log_exposure_rgb, "04_modified_log_exposure_RGB") # 3. Apply the *uncoupled* density LUT to the *modified* exposure print("Applying uncoupled density LUT...") @@ -919,27 +1525,683 @@ def main(): norm_log_exposure = (modified_log_exposure_rgb - log_exposure_range[0]) / ( log_exposure_range[1] - log_exposure_range[0] ) - density_rgb = apply_3d_lut(np.clip(norm_log_exposure, 0, 1), density_lut_uncoupled) + density_rgb = apply_3d_lut( + np.clip(norm_log_exposure, 0, 1), density_lut_uncoupled, device=lut_device + ) + save_debug_image(density_rgb, "05_uncoupled_density_RGB") # (Rest of the pipeline is unchanged) print("Converting density to linear transmittance...") linear_transmittance = np.clip(10.0 ** (-density_rgb), 0.0, 1.0) + save_debug_image(linear_transmittance, "06_linear_transmittance_RGB") print("Applying spatial effects (diffusion, halation)...") - linear_post_spatial = apply_spatial_effects( + linear_post_spatial = apply_spatial_effects_new( linear_transmittance, datasheet.info.format_mm, - datasheet.properties.couplers, datasheet.properties.interlayer, datasheet.properties.halation, image_width_px, ) + + save_debug_image(linear_post_spatial, "07_linear_post_spatial_RGB") print("Applying saturation adjustment...") linear_post_saturation = apply_saturation_rgb( linear_post_spatial, datasheet.properties.couplers.saturation_amount ) + save_debug_image(linear_post_saturation, "08_linear_post_saturation_RGB") + + final_image_to_save = linear_post_saturation + + # 1. Get Dmin (minimum density) from the first point of the H&D curve. + # This represents the density of the unexposed film base (the orange mask). + dmin_r = datasheet.properties.curves.hd[0].r + dmin_g = datasheet.properties.curves.hd[0].g + dmin_b = datasheet.properties.curves.hd[0].b + + # 2. The final density is also affected by the balance shifts. + dmin_r += datasheet.processing.balance.r_shift + dmin_g += datasheet.processing.balance.g_shift + dmin_b += datasheet.processing.balance.b_shift + + base_density = np.array([dmin_r, dmin_g, dmin_b]) + + # 3. Convert this final base density to a linear color value. + film_base_color_linear_rgb = 10.0 ** (-base_density) + + if args.border or args.print_film_base_color: + + # 3. Convert this final base density to a linear color value. + base_color_linear = 10.0 ** (-base_density) + if args.print_film_base_color: + # Convert the base color to CIEXYZ using D65 illuminant + base_color_xyz = colour.RGB_to_XYZ( + base_color_linear, + colour.models.RGB_COLOURSPACE_sRGB, + colour.models.RGB_COLOURSPACE_sRGB.whitepoint, + ) + print(f"Film base color (D65): {base_color_xyz} (XYZ)") + + if args.border: + print("Adding film base colored border...") + # 4. Create the new image with a 2.25% border on all sides. + h, w, _ = final_image_to_save.shape + border_h = int(h * 0.0225) + border_w = int(w * 0.0225) + + bordered_image = np.full( + (h + 2 * border_h, w + 2 * border_w, 3), + base_color_linear, + dtype=final_image_to_save.dtype, + ) + + # 5. Place the original rendered image in the center. + bordered_image[border_h : h + border_h, border_w : w + border_w, :] = ( + final_image_to_save + ) + try: + from PIL import Image, ImageDraw, ImageFont + + # Convert the float image (0.0-1.0) to a uint8 image (0-255) for Pillow + bordered_image_uint8 = (np.clip(bordered_image, 0.0, 1.0) * 255).astype( + np.uint8 + ) + pil_image = Image.fromarray(bordered_image_uint8) + draw = ImageDraw.Draw(pil_image) + + # Prepare the text + text_to_draw = f"{datasheet.info.name.upper()} {datasheet.properties.calibration.iso}" + text_to_repeat = f" {text_to_draw} " + + # Find a suitable monospaced font, with size relative to image height + font_size = max( + 4, int(border_h * 0.75) + ) # At least 4px, 4% of original height otherwise + font = None + font_paths = [ + "Menlo.ttc", + "Consolas.ttf", + "DejaVuSansMono.ttf", + "cour.ttf", + ] + for path in font_paths: + try: + font = ImageFont.truetype(path, size=font_size) + break + except IOError: + continue + if font is None: + print( + "Warning: A common monospaced font not found. Using Pillow's default font." + ) + try: + font = ImageFont.load_default(size=font_size) + except AttributeError: # For older Pillow versions + font = ImageFont.load_default() + + # Calculate text size using the more accurate textbbox + _, _, text_width, text_height = draw.textbbox( + (0, 0), text_to_repeat, font=font + ) + + # Calculate vertical positions to center text in the border + y_top = (border_h - text_height) // 2 + y_bottom = (h + border_h) + (border_h - text_height) // 2 + + # Start drawing with a 2% margin + start_x = int(w * 0.02) + + # Repeat text across top and bottom borders + for y_pos in [y_top, y_bottom]: + x = start_x + while x < pil_image.width: + draw.text( + (x, y_pos), text_to_repeat, font=font, fill=(200, 200, 200) + ) # Light gray fill + x += text_width + + # Convert back to float numpy array for saving + final_image_to_save = np.array(pil_image).astype(np.float64) / 255.0 + + save_debug_image(final_image_to_save, "09_bordered_image_RGB") + except ImportError: + print( + "Warning: Pillow library not found. Skipping text drawing on border.", + file=sys.stderr, + ) + final_image_to_save = bordered_image + except Exception as e: + print(f"An error occurred during text drawing: {e}", file=sys.stderr) + final_image_to_save = bordered_image + + # Apply Film Negative Correction if requested + if args.perform_negative_correction: + print("Applying film negative correction...") + print("Film base color:", film_base_color_linear_rgb) + masked_removed = final_image_to_save / film_base_color_linear_rgb + inverted_image = 1.0 / (masked_removed + EPSILON) # Avoid division by zero + max_val = np.percentile(inverted_image, 99.9) + final_image_to_save = np.clip(inverted_image / max_val, 0.0, 1.0) + save_debug_image(final_image_to_save, "10_negative_corrected_image_RGB") + + # Apply White Balance Correction + DO_SHADES_OF_GRAY = True # Use Shades of Gray algorithm for white balance + if args.perform_white_balance: + if not args.perform_negative_correction: + print( + "Warning: White balance correction is only effective when using --perform-negative-correction. Ignoring flag." + ) + else: + if DO_SHADES_OF_GRAY: + final_image_to_save = apply_shades_of_grey_wb(final_image_to_save) + save_debug_image( + final_image_to_save, "11_shades_of_gray_corrected_image_RGB" + ) + else: + final_image_to_save = apply_darktable_color_calibration( + final_image_to_save + ) + save_debug_image( + final_image_to_save, + "11_darktable_color_calibration_corrected_image_RGB", + ) + + # Apply Exposure Correction + if args.perform_exposure_correction: + print("Applying exposure correction...") + if not args.perform_negative_correction: + print( + "Warning: Exposure correction is only effective when using --perform-negative-correction. Ignoring flag." + ) + else: + # Define the patch ratio for measurement + final_image_to_save = calculate_and_apply_exposure_correction( + final_image_to_save + ) + save_debug_image(final_image_to_save, "12_exposure_corrected_image_RGB") + + # Apply Tone Curve Correction + + # Apply Film Grain + if args.simulate_grain: + print("Simulating film grain...") + # Import warp if available for GPU acceleration + try: + wp_available = True + wp.init() + print("Using NVIDIA Warp for GPU-accelerated film grain simulation") + except ImportError: + wp_available = False + print("Warp not available. Using CPU for film grain (slower)") + + # Get ISO from film datasheet + iso = datasheet.properties.calibration.iso + height, width = final_image_to_save.shape[:2] + + # Function to get grain parameters based on image dimensions and ISO + def get_grain_parameters(width, height, iso): + # --- Baseline Parameters (calibrated for a 24MP image @ ISO 400) --- + PIXELS_BASE = 24_000_000.0 + ISO_BASE = 400.0 + MU_R_BASE = 0.075 + SIGMA_BASE = 0.25 + + # --- Scaling Exponents (Artistically chosen for a natural feel) --- + ISO_EXPONENT_MU = 0.4 + ISO_EXPONENT_SIGMA = 0.3 + + # Clamp ISO to a reasonable range + iso = max(64.0, min(iso, 8000.0)) + + # Calculate the total number of pixels in the actual image + pixels_actual = float(width * height) + + # Calculate the resolution scaler + resolution_scaler = math.sqrt(pixels_actual / PIXELS_BASE) + print( + f"Resolution scaler: {resolution_scaler:.4f} (for {width}x{height} image)" + ) + + # Calculate the ISO scaler + iso_ratio = iso / ISO_BASE + iso_scaler_mu = iso_ratio**ISO_EXPONENT_MU + iso_scaler_sigma = iso_ratio**ISO_EXPONENT_SIGMA + print( + f"ISO scaler: μ = {iso_scaler_mu:.4f}, σ = {iso_scaler_sigma:.4f} (for ISO {iso})" + ) + + # Calculate the final parameters by applying both scalers + final_mu_r = MU_R_BASE * resolution_scaler * iso_scaler_mu + final_sigma = SIGMA_BASE * resolution_scaler * iso_scaler_sigma + + return (final_mu_r, final_sigma) + + if wp_available: + # Define Warp functions and kernels + @wp.func + def w_func(x: float): + if x >= 2.0: + return 0.0 + elif x < 0.0: + return 1.0 + else: + acos_arg = x / 2.0 + if acos_arg > 1.0: + acos_arg = 1.0 + if acos_arg < -1.0: + acos_arg = -1.0 + + sqrt_term_arg = 1.0 - acos_arg * acos_arg + if sqrt_term_arg < 0.0: + sqrt_term_arg = 0.0 + + overlap_area_over_pi = ( + 2.0 * wp.acos(acos_arg) - x * wp.sqrt(sqrt_term_arg) + ) / wp.pi + return overlap_area_over_pi + + @wp.func + def CB_const_radius_unit(u_pixel: float, x: float): + safe_u = wp.min(u_pixel, 0.99999) + if safe_u < 0.0: + safe_u = 0.0 + + one_minus_u = 1.0 - safe_u + if one_minus_u <= 1e-9: + return 0.0 + + wx = w_func(x) + exponent = 2.0 - wx + + term1 = wp.pow(one_minus_u, exponent) + term2 = one_minus_u * one_minus_u + return term1 - term2 + + @wp.kernel + def generate_noise_kernel( + u_image: wp.array2d(dtype=float), + variance_lut: wp.array(dtype=float), + noise_out: wp.array2d(dtype=float), + mu_r: float, + sigma_filter: float, + seed: int, + ): + ix, iy = wp.tid() + height = u_image.shape[0] + width = u_image.shape[1] + if ix >= height or iy >= width: + return + + lut_size = variance_lut.shape[0] + u_val = u_image[ix, iy] + + lut_pos = u_val * float(lut_size - 1) + lut_index0 = int(lut_pos) + lut_index0 = wp.min(wp.max(lut_index0, 0), lut_size - 2) + lut_index1 = lut_index0 + 1 + t = lut_pos - float(lut_index0) + if t < 0.0: + t = 0.0 + if t > 1.0: + t = 1.0 + + integral_val = wp.lerp( + variance_lut[lut_index0], variance_lut[lut_index1], t + ) + + var_bp = 0.0 + if sigma_filter > 1e-6 and mu_r > 1e-6: + var_bp = wp.max( + 0.0, + (mu_r * mu_r) + / (2.0 * sigma_filter * sigma_filter) + * integral_val, + ) + + std_dev = wp.sqrt(var_bp) + state = wp.rand_init(seed, ix * width + iy + seed) + noise_sample = wp.randn(state) * std_dev + noise_out[ix, iy] = noise_sample + + @wp.kernel + def convolve_2d_kernel( + input_array: wp.array2d(dtype=float), + kernel: wp.array(dtype=float), + kernel_radius: int, + output_array: wp.array2d(dtype=float), + ): + ix, iy = wp.tid() + height = input_array.shape[0] + width = input_array.shape[1] + if ix >= height or iy >= width: + return + + kernel_dim = 2 * kernel_radius + 1 + accum = float(0.0) + + for ky_offset in range(kernel_dim): + for kx_offset in range(kernel_dim): + k_idx = ky_offset * kernel_dim + kx_offset + weight = kernel[k_idx] + + # Image coordinates to sample from + read_row = ix + (ky_offset - kernel_radius) + read_col = iy + (kx_offset - kernel_radius) + + clamped_row = wp.max(0, wp.min(read_row, height - 1)) + clamped_col = wp.max(0, wp.min(read_col, width - 1)) + + sample_val = input_array[clamped_row, clamped_col] + accum += weight * sample_val + output_array[ix, iy] = accum + + @wp.kernel + def add_rgb_noise_and_clip_kernel( + r_in: wp.array2d(dtype=float), + g_in: wp.array2d(dtype=float), + b_in: wp.array2d(dtype=float), + noise_r: wp.array2d(dtype=float), + noise_g: wp.array2d(dtype=float), + noise_b: wp.array2d(dtype=float), + r_out: wp.array2d(dtype=float), + g_out: wp.array2d(dtype=float), + b_out: wp.array2d(dtype=float), + ): + ix, iy = wp.tid() + + height = r_in.shape[0] + width = r_in.shape[1] + if ix >= height or iy >= width: + return + + r_out[ix, iy] = wp.clamp(r_in[ix, iy] + noise_r[ix, iy], 0.0, 1.0) + g_out[ix, iy] = wp.clamp(g_in[ix, iy] + noise_g[ix, iy], 0.0, 1.0) + b_out[ix, iy] = wp.clamp(b_in[ix, iy] + noise_b[ix, iy], 0.0, 1.0) + + def integrand_variance(x, u_pixel): + if x < 0: + return 0.0 + if x >= 2.0: + return 0.0 + + safe_u = np.clip(u_pixel, 0.0, 0.99999) + one_minus_u = 1.0 - safe_u + if one_minus_u <= 1e-9: + return 0.0 + + acos_arg = x / 2.0 + if acos_arg > 1.0: + acos_arg = 1.0 + if acos_arg < -1.0: + acos_arg = -1.0 + + sqrt_term_arg = 1.0 - acos_arg * acos_arg + if sqrt_term_arg < 0.0: + sqrt_term_arg = 0.0 + + wx = (2.0 * np.arccos(acos_arg) - x * np.sqrt(sqrt_term_arg)) / np.pi + + cb = np.power(one_minus_u, 2.0 - wx) - np.power(one_minus_u, 2.0) + return cb * x + + def precompute_variance_lut(num_u_samples=256): + print(f"Precomputing variance LUT with {num_u_samples+1} entries...") + u_values_for_lut = np.linspace( + 0.0, 1.0, num_u_samples + 1, endpoint=True + ) + lut = np.zeros(num_u_samples + 1, dtype=np.float32) + + for i, u in enumerate(u_values_for_lut): + result, error = quad( + integrand_variance, 0, 2, args=(u,), epsabs=1e-6, limit=100 + ) + if result < 0: + result = 0.0 + lut[i] = result + if i % ((num_u_samples + 1) // 10) == 0: + print(f" LUT progress: {i}/{num_u_samples+1}") + print("Variance LUT computed.") + return lut + + def create_gaussian_kernel_2d(sigma, radius): + kernel_size = 2 * radius + 1 + g = gaussian(kernel_size, sigma, sym=True) + kernel_2d = np.outer(g, g) + sum_sq = np.sum(kernel_2d**2) + if sum_sq > 1e-9: + kernel_2d /= np.sqrt(sum_sq) + return kernel_2d.flatten().astype(np.float32) + + # Calculate grain parameters + mu_r, sigma_filter = get_grain_parameters(width, height, iso) + print(f"Film grain parameters: μr = {mu_r}, σ_filter = {sigma_filter}") + + # Use 256 u_samples for LUT + variance_lut_np = precompute_variance_lut(num_u_samples=256) + variance_lut_wp = wp.array(variance_lut_np, dtype=float, device="cuda") + + kernel_radius = max(1, int(np.ceil(3 * sigma_filter))) + kernel_np = create_gaussian_kernel_2d(sigma_filter, kernel_radius) + kernel_wp = wp.array(kernel_np, dtype=float, device="cuda") + print( + f"Using Gaussian filter with sigma={sigma_filter}, radius={kernel_radius}" + ) + + # Prepare image arrays on GPU + img_r = final_image_to_save[:, :, 0] + img_g = final_image_to_save[:, :, 1] + img_b = final_image_to_save[:, :, 2] + + r_original_wp = wp.array(img_r, dtype=float, device="cuda") + g_original_wp = wp.array(img_g, dtype=float, device="cuda") + b_original_wp = wp.array(img_b, dtype=float, device="cuda") + + # Allocate noise arrays + noise_r_unfiltered_wp = wp.empty_like(r_original_wp) + noise_g_unfiltered_wp = wp.empty_like(g_original_wp) + noise_b_unfiltered_wp = wp.empty_like(b_original_wp) + + noise_r_filtered_wp = wp.empty_like(r_original_wp) + noise_g_filtered_wp = wp.empty_like(g_original_wp) + noise_b_filtered_wp = wp.empty_like(b_original_wp) + + # Output arrays + r_output_wp = wp.empty_like(r_original_wp) + g_output_wp = wp.empty_like(g_original_wp) + b_output_wp = wp.empty_like(b_original_wp) + + # Use a random seed based on the ISO value for consistency + seed = args.seed if hasattr(args, "seed") else int(iso) + + # Generate and apply noise for each channel + if args.mono_grain: + # Create luminance image + img_gray_np = 0.299 * img_r + 0.587 * img_g + 0.114 * img_b + u_gray_wp = wp.array(img_gray_np, dtype=float, device="cuda") + noise_image_wp = wp.empty_like(u_gray_wp) + + print("Generating monochromatic noise...") + wp.launch( + kernel=generate_noise_kernel, + dim=(height, width), + inputs=[ + u_gray_wp, + variance_lut_wp, + noise_image_wp, + mu_r, + sigma_filter, + seed, + ], + device="cuda", + ) + + noise_filtered_wp = wp.empty_like(u_gray_wp) + wp.launch( + kernel=convolve_2d_kernel, + dim=(height, width), + inputs=[ + noise_image_wp, + kernel_wp, + kernel_radius, + noise_filtered_wp, + ], + device="cuda", + ) + + # Copy the same noise to all channels + noise_r_filtered_wp.assign(noise_filtered_wp) + noise_g_filtered_wp.assign(noise_filtered_wp) + noise_b_filtered_wp.assign(noise_filtered_wp) + else: + # Process each channel separately + print("Processing R channel...") + wp.launch( + kernel=generate_noise_kernel, + dim=(height, width), + inputs=[ + r_original_wp, + variance_lut_wp, + noise_r_unfiltered_wp, + mu_r, + sigma_filter, + seed, + ], + device="cuda", + ) + wp.launch( + kernel=convolve_2d_kernel, + dim=(height, width), + inputs=[ + noise_r_unfiltered_wp, + kernel_wp, + kernel_radius, + noise_r_filtered_wp, + ], + device="cuda", + ) + + print("Processing G channel...") + wp.launch( + kernel=generate_noise_kernel, + dim=(height, width), + inputs=[ + g_original_wp, + variance_lut_wp, + noise_g_unfiltered_wp, + mu_r, + sigma_filter, + seed + 1, + ], + device="cuda", + ) + wp.launch( + kernel=convolve_2d_kernel, + dim=(height, width), + inputs=[ + noise_g_unfiltered_wp, + kernel_wp, + kernel_radius, + noise_g_filtered_wp, + ], + device="cuda", + ) + + print("Processing B channel...") + wp.launch( + kernel=generate_noise_kernel, + dim=(height, width), + inputs=[ + b_original_wp, + variance_lut_wp, + noise_b_unfiltered_wp, + mu_r, + sigma_filter, + seed + 2, + ], + device="cuda", + ) + wp.launch( + kernel=convolve_2d_kernel, + dim=(height, width), + inputs=[ + noise_b_unfiltered_wp, + kernel_wp, + kernel_radius, + noise_b_filtered_wp, + ], + device="cuda", + ) + + # Add the noise to the original image + print("Adding noise to image...") + wp.launch( + kernel=add_rgb_noise_and_clip_kernel, + dim=(height, width), + inputs=[ + r_original_wp, + g_original_wp, + b_original_wp, + noise_r_filtered_wp, + noise_g_filtered_wp, + noise_b_filtered_wp, + r_output_wp, + g_output_wp, + b_output_wp, + ], + device="cuda", + ) + + # Copy results back to CPU + img_with_grain = np.zeros((height, width, 3), dtype=np.float32) + img_with_grain[:, :, 0] = r_output_wp.numpy() + img_with_grain[:, :, 1] = g_output_wp.numpy() + img_with_grain[:, :, 2] = b_output_wp.numpy() + + final_image_to_save = img_with_grain + + else: + # CPU fallback implementation (simplified) + print("Using CPU for film grain simulation (this will be slower)") + print("Consider installing NVIDIA Warp for GPU acceleration") + + # Calculate grain parameters + mu_r, sigma_filter = get_grain_parameters(width, height, iso) + + # Simple noise generation for CPU fallback + rng = np.random.RandomState( + args.seed if hasattr(args, "seed") else int(iso) + ) + + # Generate random noise for each channel + noise_strength = mu_r * 0.15 # Simplified approximation + + if args.mono_grain: + # Generate one noise channel and apply to all + noise = rng.normal(0, noise_strength, (height, width)) + noise = gaussian_filter(noise, sigma=sigma_filter) + + # Apply same noise to all channels + for c in range(3): + final_image_to_save[:, :, c] = np.clip( + final_image_to_save[:, :, c] + noise, 0.0, 1.0 + ) + else: + # Generate separate noise for each channel + for c in range(3): + noise = rng.normal(0, noise_strength, (height, width)) + noise = gaussian_filter(noise, sigma=sigma_filter) + final_image_to_save[:, :, c] = np.clip( + final_image_to_save[:, :, c] + noise, 0.0, 1.0 + ) + + save_debug_image(final_image_to_save, "13_final_image_with_grain_RGB") + print("Converting to output format and saving...") + # For some reason DNG files have the top 64 pixel rows black, so we crop them out. + if args.input_image.lower().endswith((".dng")): + final_image_to_save = final_image_to_save[64:, :, :] + output_image = ( - np.clip(linear_post_saturation, 0.0, 1.0) + np.clip(final_image_to_save, 0.0, 1.0) * (65535.0 if args.output_image.lower().endswith((".tiff", ".tif")) else 255.0) ).astype( np.uint16 if args.output_image.lower().endswith((".tiff", ".tif")) else np.uint8 diff --git a/filmgrain b/filmgrain index db5b038..93225e2 100755 --- a/filmgrain +++ b/filmgrain @@ -19,6 +19,57 @@ import imageio.v3 as iio from scipy.integrate import quad from scipy.signal.windows import gaussian # For creating Gaussian kernel import rawpy +import math + +def get_grain_parameters(width, height, iso): + """ + Calculates mu_r and sigma for the film grain script based on image + dimensions (width, height) and target ISO. + + Args: + width (int): The width of the source image in pixels. + height (int): The height of the source image in pixels. + iso (float): The target film ISO to simulate (e.g., 100, 400, 3200). + + Returns: + tuple: A tuple containing the calculated (mu_r, sigma) for the script. + """ + # --- Baseline Parameters (calibrated for a 24MP image @ ISO 400) --- + # A 24MP image (e.g., 6000x4000) has 24,000,000 pixels. + PIXELS_BASE = 24_000_000.0 + ISO_BASE = 400.0 + MU_R_BASE = 0.15 + SIGMA_BASE = 0.5 + + # --- Scaling Exponents (Artistically chosen for a natural feel) --- + # The exponent for mu_r is larger than for sigma to ensure that + # grain intensity (related to mu_r²/sigma²) increases with ISO. + ISO_EXPONENT_MU = 0.4 + ISO_EXPONENT_SIGMA = 0.3 + + # Clamp ISO to a reasonable range to avoid extreme/invalid values + iso = max(64.0, min(iso, 8000.0)) + + # 1. Calculate the total number of pixels in the actual image + pixels_actual = float(width * height) + + # 2. Calculate the resolution scaler + # This scales parameters based on the image's linear dimensions (sqrt of area) + # relative to the 24MP baseline. + resolution_scaler = math.sqrt(pixels_actual / PIXELS_BASE) + print(f"Resolution scaler: {resolution_scaler:.4f} (for {width}x{height} image)") + + # 3. Calculate the ISO scaler + iso_ratio = iso / ISO_BASE + iso_scaler_mu = iso_ratio ** ISO_EXPONENT_MU + iso_scaler_sigma = iso_ratio ** ISO_EXPONENT_SIGMA + print(f"ISO scaler: μ = {iso_scaler_mu:.4f}, σ = {iso_scaler_sigma:.4f} (for ISO {iso})") + + # 4. Calculate the final parameters by applying both scalers + final_mu_r = MU_R_BASE * resolution_scaler * iso_scaler_mu + final_sigma = SIGMA_BASE * resolution_scaler * iso_scaler_sigma + + return (final_mu_r, final_sigma) wp.init() @@ -239,7 +290,7 @@ def create_gaussian_kernel_2d(sigma, radius): return kernel_2d.flatten().astype(np.float32) -def render_film_grain(image_path, mu_r, sigma_filter, output_path, seed=42, mono=False): +def render_film_grain(image_path, iso, output_path, seed=42, mono=False): try: if image_path.lower().endswith('.arw') or image_path.lower().endswith('.dng'): # Use rawpy for TIFF images to handle metadata correctly @@ -276,6 +327,7 @@ def render_film_grain(image_path, mu_r, sigma_filter, output_path, seed=42, mono img_np_float = img_np.astype(np.float32) height, width, channels = img_np_float.shape + mu_r, sigma_filter = get_grain_parameters(width, height, iso) print(f"Input image: {width}x{height}x{channels}") print(f"Parameters: μr = {mu_r}, σ_filter = {sigma_filter}") @@ -399,13 +451,10 @@ if __name__ == "__main__": parser.add_argument("input_image", help="Path to the input image (TIFF, PNG, JPG, or RAW (ARW/DNG) format)") parser.add_argument("output_image", help="Path to save the output image (TIFF (16-bit), PNG, JPG format)") parser.add_argument( - "--mu_r", type=float, default=0.1, help="Mean grain radius (relative to pixel size)" - ) - parser.add_argument( - "--sigma", - type=float, - default=0.8, - help="Standard deviation of the Gaussian Filter for noise blurring (sigma_filter).", + "--iso", + type=int, + default=400, + help="Target film ISO to simulate (e.g., 100, 400, 1600).", ) parser.add_argument( "--seed", type=int, default=42, help="Random seed for noise generation" @@ -416,17 +465,7 @@ if __name__ == "__main__": args = parser.parse_args() - if args.mu_r <= 0: - print("Warning: mu_r should be positive. Using default 0.1") - args.mu_r = 0.1 - if args.sigma <= 0: - print("Warning: sigma_filter should be positive. Using default 0.8") - args.sigma = 0.8 - if args.sigma < 3 * args.mu_r: - print( - f"Warning: sigma_filter ({args.sigma}) is less than 3*mu_r ({3 * args.mu_r:.2f}). Approximations in the model might be less accurate." - ) render_film_grain( - args.input_image, args.mu_r, args.sigma, args.output_image, args.seed, args.mono + args.input_image, args.iso, args.output_image, args.seed, args.mono ) \ No newline at end of file diff --git a/filmscan b/filmscan index ea35391..39f0bdf 100755 --- a/filmscan +++ b/filmscan @@ -314,8 +314,8 @@ def negadoctor_process(img_aces_negative: np.ndarray, compressed_highlights = soft_clip_param + (1.0 - e_to_gamma) * soft_clip_comp output_pixels = np.where(print_gamma > soft_clip_param, compressed_highlights, print_gamma) - - return np.clip(output_pixels, 0.0, 1.0) # Final clip to 0-1 range + + return np.clip(output_pixels, 0.0, None) # Final clip to 0-Inf range # --- Main Execution --- @@ -371,10 +371,26 @@ if __name__ == "__main__": print("Converting to ACEScg...") # img_linear_srgb = colour.gamma_correct(img_float, 1/2.2, 'ITU-R BT.709') # Approximate sRGB EOTF decoding img_linear_srgb = colour.models.eotf_sRGB(img_float) # More accurate sRGB EOTF decoding - img_acescg = colour.RGB_to_RGB(img_linear_srgb, - colour.models.RGB_COLOURSPACE_sRGB, - colour.models.RGB_COLOURSPACE_ACESCG) - img_acescg = np.clip(img_acescg, 0.0, None) # ACEScg can have values > 1.0 for very bright sources + # Calculate the full transformation matrix from linear sRGB to ACEScg. + # This includes chromatic adaptation from sRGB's D65 whitepoint + # to ACEScg's D60 whitepoint, which is crucial for accuracy. + sRGB_cs = colour.models.RGB_COLOURSPACE_sRGB + ACEScg_cs = colour.models.RGB_COLOURSPACE_ACESCG + + # colour.matrix_RGB_to_RGB computes the combined matrix: M_XYZ_to_ACEScg @ M_CAT @ M_sRGB_to_XYZ + # This matrix is cached by colour-science after the first call for efficiency. + srgb_to_acescg_matrix = colour.matrix_RGB_to_RGB(sRGB_cs, ACEScg_cs) # Shape: (3, 3) + + # Apply the transformation using NumPy's matrix multiplication operator @. + # img_linear_srgb has shape (H, W, 3). + # srgb_to_acescg_matrix.T also has shape (3, 3). + # The @ operator performs (H, W, 3) @ (3, 3) -> (H, W, 3), + # effectively applying the 3x3 matrix to each 3-element RGB vector. + # This is generally highly optimized and avoids explicit reshape calls. + img_acescg = img_linear_srgb @ srgb_to_acescg_matrix.T + + # ACEScg space can legitimately have values outside [0,1] for bright, saturated colors. + img_acescg = np.clip(img_acescg, 0.0, None) print(f"Image in ACEScg: shape: {img_acescg.shape}, min: {img_acescg.min():.4f}, max: {img_acescg.max():.4f}, mean: {img_acescg.mean():.4f}") diff --git a/filmscanv2 b/filmscanv2 new file mode 100755 index 0000000..077f22f --- /dev/null +++ b/filmscanv2 @@ -0,0 +1,466 @@ +#!/usr/bin/env -S uv run --script +# /// script +# dependencies = [ +# "numpy", +# "scipy", +# "Pillow", +# "imageio", +# "rawpy", +# "colour-science", +# ] +# /// +#!/usr/bin/env python + +#!/usr/bin/env python +import argparse +import sys +import numpy as np +import imageio.v2 as iio +import colour +from scipy.signal import convolve2d +from scipy.ndimage import gaussian_filter1d +from scipy.signal import find_peaks, savgol_filter +from scipy.ndimage import sobel + + +# --- Color Space Conversion --- + +def to_acescg(image, input_colorspace='sRGB'): + """Converts an image from a specified colorspace to ACEScg.""" + return colour.RGB_to_RGB(image, colour.models.RGB_COLOURSPACES[input_colorspace], colour.models.RGB_COLOURSPACES['ACEScg']) + +def from_acescg(image, output_colorspace='sRGB'): + """Converts an image from ACEScg to a specified colorspace.""" + return colour.RGB_to_RGB(image, colour.models.RGB_COLOURSPACES['ACEScg'], colour.models.RGB_COLOURSPACES[output_colorspace]) + +# --- Image Processing --- + +def _analyze_profile(profile, prominence, width): + """Helper function to find the most prominent peak in a 1D gradient profile.""" + if profile.size == 0: + return None + # Smooth the profile to reduce noise. Window must be odd and less than profile size. + window_length = min(51, len(profile) // 2 * 2 + 1) + if window_length < 5: # savgol_filter requires window_length > polyorder + smoothed_profile = profile + else: + smoothed_profile = savgol_filter(profile, window_length=window_length, polyorder=2) + # Find all peaks that stand out from the baseline. + peaks, properties = find_peaks(smoothed_profile, prominence=prominence, width=width) + if len(peaks) == 0: + return None + # Return the index of the most prominent peak. + most_prominent_peak_index = np.argmax(properties['prominences']) + return peaks[most_prominent_peak_index] + + +def detect_and_crop_border_gradient(image, border_percent=5, prominence=5.0, min_width=2): + """ + Detects film edges using a directional gradient method, robust to complex borders. + """ + # 1. Convert to grayscale for gradient analysis + luminosity_weights = np.array([0.2126, 0.7152, 0.0722]) + image_gray = np.dot(image, luminosity_weights) + height, width = image_gray.shape + + # 2. Calculate directional gradients once for the entire image + grad_y = sobel(image_gray, axis=0) # For horizontal lines (top, bottom) + grad_x = sobel(image_gray, axis=1) # For vertical lines (left, right) + + coords = {} + search_w = int(width * border_percent / 100) + search_h = int(height * border_percent / 100) + + # 3. Analyze each border + # Left Edge (Dark -> Light transition => positive grad_x) + left_profile = np.sum(np.maximum(0, grad_x[:, :search_w]), axis=0) + left_coord = _analyze_profile(left_profile, prominence, min_width) + coords['left'] = left_coord if left_coord is not None else 0 + + # Right Edge (Light -> Dark transition => negative grad_x) + right_profile = np.sum(np.maximum(0, -grad_x[:, -search_w:]), axis=0) + right_coord = _analyze_profile(right_profile[::-1], prominence, min_width) + coords['right'] = (width - 1 - right_coord) if right_coord is not None else (width - 1) + + # Top Edge (Dark -> Light transition => positive grad_y) + top_profile = np.sum(np.maximum(0, grad_y[:search_h, :]), axis=1) + top_coord = _analyze_profile(top_profile, prominence, min_width) + coords['top'] = top_coord if top_coord is not None else 0 + + # Bottom Edge (Light -> Dark transition => negative grad_y) + bottom_profile = np.sum(np.maximum(0, -grad_y[-search_h:, :]), axis=1) + bottom_coord = _analyze_profile(bottom_profile[::-1], prominence, min_width) + coords['bottom'] = (height - 1 - bottom_coord) if bottom_coord is not None else (height - 1) + + l, t, r, b = map(int, [coords['left'], coords['top'], coords['right'], coords['bottom']]) + + if not (l < r and t < b): + print("Warning: Gradient border detection failed. Using full image.", file=sys.stderr) + film_base_color = np.median(image.reshape(-1, 3), axis=0) + return image, film_base_color + + print(f"Detected image box: (left, top, right, bottom) = ({l}, {t}, {r}, {b})") + + # 4. Sample film base color from the border regions + mask = np.zeros(image.shape[:2], dtype=bool) + mask[t:b+1, l:r+1] = True + border_pixels = image[~mask] + + if border_pixels.size == 0: + print("Warning: Border detected, but no pixels to sample. Using image median.", file=sys.stderr) + film_base_color = np.median(image[t:b+1, l:r+1].reshape(-1, 3), axis=0) + else: + film_base_color = np.median(border_pixels.reshape(-1, 3), axis=0) + + # 5. Crop and return + cropped_image = image[t:b+1, l:r+1] + return cropped_image, film_base_color + + + +def invert_negative(image, params): + """ + Inverts a negative image based on RawTherapee's filmnegative module logic. + """ + ref_in = params['refInput'] + 1e-9 # Add epsilon to avoid division by zero + ref_out = params['refOutput'] + + rexp = -(params['greenExp'] * params['redRatio']) + gexp = -params['greenExp'] + bexp = -(params['greenExp'] * params['blueRatio']) + + rmult = ref_out[0] / (ref_in[0] ** rexp) + gmult = ref_out[1] / (ref_in[1] ** gexp) + bmult = ref_out[2] / (ref_in[2] ** bexp) + + inverted_image = image.copy() + 1e-9 + + inverted_image[:, :, 0] = rmult * (inverted_image[:, :, 0] ** rexp) + inverted_image[:, :, 1] = gmult * (inverted_image[:, :, 1] ** gexp) + inverted_image[:, :, 2] = bmult * (inverted_image[:, :, 2] ** bexp) + + return np.clip(inverted_image, 0.0, None) + + +def negative_auto_exposure(image, target_percentile=50, highlight_percentile=99.5, highlight_preservation=0.85): + """ + Automatically adjusts the exposure of a negative image to maximize dynamic range while preserving highlights. + + Args: + image: Input image in linear RGB space (ACEScg) NEGATIVE + target_percentile: Percentile to target for middle exposure (default: 50) + highlight_percentile: Percentile to consider as highlights (default: 99.5) + highlight_preservation: Controls how much to preserve highlights (0-1, default: 0.85) + + Returns: + Adjusted image with optimized dynamic range + """ + # Calculate luminance using standard coefficients for linear light + luminance = 0.2126 * image[:,:,0] + 0.7152 * image[:,:,1] + 0.0722 * image[:,:,2] + + # Analyze histogram + hist_values = luminance.flatten() + + # Get key luminance values from histogram + mid_value = np.percentile(hist_values, target_percentile) + highlight_value = np.percentile(hist_values, highlight_percentile) + + # Target middle gray for optimal exposure (standard for scene-referred linear) + target_middle = 0.18 + + # Calculate exposure factor for middle values + middle_exposure_factor = target_middle / (mid_value + 1e-9) + + # Calculate highlight protection factor + highlight_target = 0.9 # Target highlights to be at 90% of range + highlight_exposure_factor = highlight_target / (highlight_value * middle_exposure_factor + 1e-9) + + # Blend between middle and highlight exposure factor based on preservation setting + final_exposure_factor = middle_exposure_factor * (1 - highlight_preservation) + \ + (middle_exposure_factor * highlight_exposure_factor) * highlight_preservation + + # Apply exposure adjustment + adjusted_image = image * final_exposure_factor + + return np.clip(adjusted_image, 0.0, None) # Ensure no negative values but allow overflow for highlights + + +def auto_exposure(image, target_percentile=50, highlight_percentile=99.5, highlight_preservation=0.85): + """ + Automatically adjusts the exposure of an image to maximize dynamic range while preserving highlights. + + Args: + image: Input image in linear RGB space (ACEScg) + target_percentile: Percentile to target for middle exposure (default: 50) + highlight_percentile: Percentile to consider as highlights (default: 99.5) + highlight_preservation: Controls how much to preserve highlights (0-1, default: 0.85) + + Returns: + Adjusted image with optimized dynamic range + """ + # Calculate luminance using standard coefficients for linear light + luminance = 0.2126 * image[:,:,0] + 0.7152 * image[:,:,1] + 0.0722 * image[:,:,2] + + # Analyze histogram + hist_values = luminance.flatten() + + # Get key luminance values from histogram + mid_value = np.percentile(hist_values, target_percentile) + highlight_value = np.percentile(hist_values, highlight_percentile) + + # Target middle gray for optimal exposure (standard for scene-referred linear) + target_middle = 0.18 + + # Calculate exposure factor for middle values + middle_exposure_factor = target_middle / (mid_value + 1e-9) + + # Calculate highlight protection factor + highlight_target = 0.9 # Target highlights to be at 90% of range + highlight_exposure_factor = highlight_target / (highlight_value * middle_exposure_factor + 1e-9) + + # Blend between middle and highlight exposure factor based on preservation setting + final_exposure_factor = middle_exposure_factor * (1 - highlight_preservation) + \ + (middle_exposure_factor * highlight_exposure_factor) * highlight_preservation + + # Apply exposure adjustment + adjusted_image = image * final_exposure_factor + + # Apply subtle S-curve for enhanced contrast while preserving highlights + # Convert to log space for easier manipulation + log_image = np.log2(adjusted_image + 1e-9) + + # Apply soft contrast enhancement + contrast_strength = 0.15 + log_image = log_image * (1 + contrast_strength) - np.mean(log_image) * contrast_strength + + # Convert back to linear space + enhanced_image = np.power(2, log_image) + + # Ensure no negative values, but allow overflow for highlight processing later + return np.clip(enhanced_image, 0.0, None) + + +def auto_exposure_pec(linear_image, **kwargs): + """ + Implements Practical Exposure Correction (PEC) on a linear image. + Automatically determines the correction mode based on image brightness. + """ + params = { + 'K': kwargs.get('K', 3), + 'c_under': kwargs.get('c_under', 1.0), + 'c_over': kwargs.get('c_over', 0.6), + 'target_lum': kwargs.get('target_lum', 0.18) + } + + # Auto-detect mode + luminosity_weights = np.array([0.2126, 0.7152, 0.0722]) + mean_luminance = np.mean(np.dot(linear_image, luminosity_weights)) + + if mean_luminance < params['target_lum']: + mode, c = 'underexposure', params['c_under'] + op = np.add + print(f"Image appears underexposed (mean lum: {mean_luminance:.3f}). Applying PEC in '{mode}' mode.") + else: + mode, c = 'overexposure', params['c_over'] + op = np.subtract + print(f"Image appears overexposed (mean lum: {mean_luminance:.3f}). Applying PEC in '{mode}' mode.") + + y = linear_image.astype(np.float64) + adversarial_func = lambda z: c * z * (1 - z) + g_y = op(y, adversarial_func(y)) + x_k = g_y.copy() + + # The PEC iterative scheme (T=1, as recommended) + for _ in range(params['K']): + compensation = adversarial_func(x_k) + x_k = op(g_y, compensation) + + return x_k + + +def _rgb_to_yuv_huo(image_rgb: np.ndarray) -> np.ndarray: + """Converts RGB to the paper's specific YUV space.""" + matrix = np.array([[0.299, -0.299, 0.701], [0.587, -0.587, -0.587], [0.114, 0.886, -0.114]]) + return image_rgb.astype(np.float64) @ matrix + +def _k_function(error: float, a: float, b: float) -> float: + """Non-linear error weighting function K(x) from Eq. 16.""" + abs_error, sign = np.abs(error), np.sign(error) + if abs_error >= a: return 2.0 * sign + elif abs_error >= b: return 1.0 * sign + else: return 0.0 + +def white_balance_huo(image_float: np.ndarray, **kwargs): + """Performs iterative white balance based on the Huo et al. 2006 paper.""" + params = { + 't_threshold': kwargs.get('t_threshold', 0.1321), + 'mu': kwargs.get('mu', 0.0312), + 'a': kwargs.get('a', 0.8 / 255.0), + 'b': kwargs.get('b', 0.15 / 255.0), + 'max_iter': kwargs.get('max_iter', 16), + } + + gains = np.array([1.0, 1.0, 1.0], dtype=np.float64) + + print("Starting iterative white balance adjustment...") + for i in range(params['max_iter']): + balanced_image = np.clip(image_float * gains, 0.0, 1.0) + yuv_image = _rgb_to_yuv_huo(balanced_image) + Y, U, V = yuv_image[..., 0], yuv_image[..., 1], yuv_image[..., 2] + + luminance_mask = (Y > 0.1) & (Y < 0.95) + if not np.any(luminance_mask): + print(f"Iteration {i+1}: No pixels in valid luminance range. Stopping."); break + + gray_mask_indices = (np.abs(U[luminance_mask]) + np.abs(V[luminance_mask])) / Y[luminance_mask] < params['t_threshold'] + + gray_points_U = U[luminance_mask][gray_mask_indices] + gray_points_V = V[luminance_mask][gray_mask_indices] + + if gray_points_U.size < 100: + print(f"Iteration {i+1}: Not enough gray points found ({gray_points_U.size}). Stopping."); break + + u_mean, v_mean = np.mean(gray_points_U), np.mean(gray_points_V) + + if np.abs(u_mean) < params['b'] and np.abs(v_mean) < params['b']: + print(f"Iteration {i+1}: Converged. u_mean={u_mean:.4f}, v_mean={v_mean:.4f}"); break + + error, channel_idx, channel_name = (-u_mean, 2, "B") if np.abs(u_mean) > np.abs(v_mean) else (-v_mean, 0, "R") + adjustment = params['mu'] * _k_function(error, params['a'], params['b']) + gains[channel_idx] += adjustment + print(f"Iter {i+1:2d}: Adjusting {channel_name}-gain. u_mean={u_mean:.4f}, v_mean={v_mean:.4f}, Adj={adjustment:+.4f}") + + print(f"Final gains: R={gains[0]:.4f}, G={gains[1]:.4f}, B={gains[2]:.4f}") + return image_float * gains + + +def white_balance_gray_world(image): + """ + Performs white balancing using the Gray World assumption. + """ + r_avg, g_avg, b_avg = np.mean(image, axis=(0, 1)) + avg_lum = (r_avg + g_avg + b_avg) / 3.0 + + r_gain = avg_lum / (r_avg + 1e-9) + g_gain = avg_lum / (g_avg + 1e-9) + b_gain = avg_lum / (b_avg + 1e-9) + + wb_image = image.copy() + wb_image[:, :, 0] *= r_gain + wb_image[:, :, 1] *= g_gain + wb_image[:, :, 2] *= b_gain + + return wb_image + +# --- Main Execution --- + +def main(): + parser = argparse.ArgumentParser( + description="Converts a film negative to a positive image. Requires numpy, imageio, and colour-science.", + formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument('input_image', help="Path to the input negative image file (e.g., TIFF, PNG, JPG).") + parser.add_argument('output_image', help="Path to save the output positive image file.") + parser.add_argument('--border', action='store_true', help="Indicates the image has a film border to sample the base color from.") + parser.add_argument('--no-crop', dest='crop', action='store_false', help="Disables cropping of the film border when --border is used.") + parser.add_argument('--no-wb', dest='white_balance', action='store_false', help="Disables the automatic white balance step.") + parser.add_argument('--no-auto-exposure', dest='auto_exposure', action='store_false', help="Disables the automatic exposure adjustment step.") + parser.add_argument('--prominence', type=float, default=5.0, help="[Border] Peak prominence for edge detection.") + parser.add_argument('--awb-t', type=float, default=0.1321, help="[AWB] Gray point detection threshold `T`.") + parser.add_argument('--awb-mu', type=float, default=0.0312, help="[AWB] Adjustment step size `mu`.") + parser.add_argument('--pec-k', type=int, default=3, help="[Exposure] Number of inner loop iterations K.") + parser.add_argument('--pec-target-lum', type=float, default=0.18, help="[Exposure] Target middle gray for auto mode detection.") + + args = parser.parse_args() + + # 1. Load Image + try: + image_raw = iio.imread(args.input_image) + except FileNotFoundError: + print(f"Error: Input file not found at {args.input_image}", file=sys.stderr) + return + + # Convert to float (0.0 to 1.0) for processing + if image_raw.dtype == np.uint16: + image_fp = image_raw.astype(np.float32) / 65535.0 + elif image_raw.dtype == np.uint8: + image_fp = image_raw.astype(np.float32) / 255.0 + else: # Handle other types like float + image_fp = image_raw.astype(np.float32) + if image_fp.max() > 1.0: + image_fp /= image_fp.max() + + # Handle grayscale and alpha channels using numpy + if image_fp.ndim == 2: + print("Input is grayscale, converting to RGB.", file=sys.stderr) + image_fp = np.stack((image_fp,) * 3, axis=-1) + if image_fp.shape[2] == 4: + print("Input has alpha channel, removing it for processing.", file=sys.stderr) + image_fp = image_fp[:, :, :3] + + # 2. Convert to ACEScg Colorspace + image_to_process = None + if args.border: + print("Using marching border detection...") + cropped_image, film_base_color = detect_and_crop_border_gradient(image_fp, prominence=args.prominence) + if args.crop: + print("Cropping border...") + image_fp = cropped_image + print("Converting to ACEScg...") + image_aces = to_acescg(image_fp, 'sRGB') + image_to_process = image_aces + + else: + print("No border specified, using image median for base color...") + print("Converting to ACEScg...") + image_aces = to_acescg(image_fp, 'sRGB') + image_to_process = image_aces + h, w, _ = image_to_process.shape + center_crop = image_to_process[h//4:3*h//4, w//4:3*w//4, :] + film_base_color = np.median(center_crop.reshape(-1, 3), axis=0) + + print(f"Detected film base color (ACEScg): {film_base_color}") + + # 4. Invert Negative + print("Inverting negative...") + inversion_params = { + 'greenExp': 1.5, + 'redRatio': 2.04 / 1.5, + 'blueRatio': 1.29 / 1.5, + 'refInput': film_base_color, + 'refOutput': np.array([0.05, 0.05, 0.05]) + } + + positive_image = invert_negative(image_to_process, inversion_params) + + # 5. Auto Exposure Adjustment + if args.auto_exposure: + print("Applying automatic exposure adjustment...") + positive_image = auto_exposure_pec(positive_image, K=args.pec_k, target_lum=args.pec_target_lum) + + # 5. White Balance + if args.white_balance: + print("Applying white balance...") + awb_params = {'t_threshold': args.awb_t, 'mu': args.awb_mu} + positive_image = white_balance_gray_world(positive_image) + + # 6. Convert back from ACEScg and save + print("Converting from ACEScg to sRGB for output...") + output_image_srgb = from_acescg(positive_image, 'sRGB') + output_image_srgb = np.clip(output_image_srgb, 0.0, 1.0) + + # 7. Save to file + output_extension = args.output_image.lower().split('.')[-1] + if output_extension in ['tif', 'tiff']: + print("Saving as 16-bit TIFF.") + final_image = (output_image_srgb * 65535.0).astype(np.uint16) + else: + print("Saving as 8-bit image.") + final_image = (output_image_srgb * 255.0).astype(np.uint8) + + iio.imwrite(args.output_image, final_image) + print(f"Successfully saved positive image to {args.output_image}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/filmscanv3.py b/filmscanv3.py new file mode 100755 index 0000000..e405f73 --- /dev/null +++ b/filmscanv3.py @@ -0,0 +1,242 @@ +#!/usr/bin/env -S uv run --script +# /// script +# dependencies = [ +# "numpy", +# "scipy", +# "Pillow", +# "imageio", +# "colour-science", +# "tifffile", +# ] +# /// + +import argparse +import time +import sys +import imageio.v2 as imageio +import numpy as np +import colour + +# --- Helper Functions --- + +def find_film_base_from_border(linear_image: np.ndarray) -> np.ndarray: + """ + Finds the film base color by sampling the outer 2% border of the image. + + This function implements the --border logic. It randomly samples 64 patches + of 8x8 pixels from the defined border area, calculates the mean color of each + patch, and returns the median of these mean colors. Median is used as it's + more robust to outliers (like dust or scratches) than the mean or mode. + + Args: + linear_image: The image data in a linear RGB color space (float32). + + Returns: + A 1D NumPy array representing the film base color [R, G, B]. + """ + print("Finding film base color from image border...") + h, w, _ = linear_image.shape + border_px_h = int(h * 0.02) + border_px_w = int(w * 0.02) + + # Define four rectangular regions for the border + regions = [ + (0, 0, h, border_px_w), # Left + (0, w - border_px_w, h, w), # Right + (0, 0, border_px_h, w), # Top + (h - border_px_h, 0, h, w), # Bottom + ] + + patch_size = 8 + num_patches = 64 + patch_means = [] + + for _ in range(num_patches): + # Pick a random region and sample a patch from it + top, left, bottom, right = regions[np.random.randint(4)] + + # Ensure we don't sample outside the bounds + if (bottom - top <= patch_size) or (right - left <= patch_size): + continue + + rand_y = np.random.randint(top, bottom - patch_size) + rand_x = np.random.randint(left, right - patch_size) + + patch = linear_image[rand_y:rand_y + patch_size, rand_x:rand_x + patch_size] + patch_means.append(np.mean(patch, axis=(0, 1))) + + if not patch_means: + raise ValueError("Could not sample any patches from the border. Check image dimensions and border size.") + + # Median is more robust to outliers (dust, scratches) than mean + film_base_color = np.median(patch_means, axis=0) + return film_base_color + +def find_film_base_from_darkest(linear_image: np.ndarray) -> np.ndarray: + """ + Finds the film base color by identifying the darkest pixels in the negative. + + This function implements the default logic. It calculates the luminance of + the image, identifies the 0.1% darkest pixels, and returns their average color. + + Args: + linear_image: The image data in a linear RGB color space (float32). + + Returns: + A 1D NumPy array representing the film base color [R, G, B]. + """ + print("Finding film base color from darkest part of the image...") + # Using Rec.709 coefficients for luminance calculation is a standard approach + luminance = colour.RGB_luminance(linear_image, primaries='ITU-R BT.709', whitepoint='D65') + + # Find the threshold for the darkest 0.1% of pixels + darkest_threshold = np.percentile(luminance, 0.1) + darkest_pixels_mask = luminance <= darkest_threshold + + # If no pixels are found (unlikely), relax the threshold + if not np.any(darkest_pixels_mask): + darkest_pixels_mask = luminance <= np.percentile(luminance, 1) + + darkest_pixels = linear_image[darkest_pixels_mask] + film_base_color = np.mean(darkest_pixels, axis=0) + return film_base_color + +# --- Main Processing Function --- + +def main(): + parser = argparse.ArgumentParser( + description="A high-performance tool for inverting color negative film scans.", + formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("input_file", help="Path to the 16-bit input TIFF file (sRGB).") + parser.add_argument("output_file", help="Path to save the 16-bit output TIFF file.") + + method_group = parser.add_mutually_exclusive_group() + method_group.add_argument( + "--border", + action="store_true", + help="Find film base color by sampling the outer 2%% of the image border." + ) + method_group.add_argument( + "--color", + type=str, + help="Specify film base color as a CIEXYZ value (e.g., '0.41,0.35,0.18').\nAssumes D65 standard illuminant." + ) + + parser.add_argument( + "--wide", + action="store_true", + help="Output in a wide gamut color space (Rec.2020) instead of sRGB." + ) + + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + + args = parser.parse_args() + + start_time = time.time() + + # 1. Read Image and Normalize + print(f"Reading image: {args.input_file}") + try: + # ImageIO reads TIFFs as (height, width, channels) + img_int16 = imageio.imread(args.input_file) + except FileNotFoundError: + print(f"Error: Input file not found at {args.input_file}") + sys.exit(1) + + if img_int16.dtype != np.uint16: + print("Warning: Input image is not 16-bit. Results may have reduced quality.") + + # Convert to float (0.0 - 1.0) for processing + img_float = img_int16.astype(np.float32) / 65535.0 + + # 2. Color Space Conversion (to Linear) + # The input is assumed to be in standard (non-linear) sRGB space. + # All our math must happen in a linear space. + print("Converting from sRGB to linear sRGB...") + srgb_colourspace = colour.models.RGB_COLOURSPACE_sRGB + linear_image = colour.cctf_decoding(img_float, function='sRGB') + + # 3. Determine Film Base Color + film_base_color = None + if args.color: + print(f"Using provided CIEXYZ color: {args.color}") + try: + xyz_values = np.array([float(x.strip()) for x in args.color.split(',')]) + print(f"Parsed XYZ values: {xyz_values}") + if xyz_values.shape != (3,): + print("Error: --color must be in the format 'X,Y,Z' with three values.") + raise ValueError + # Convert the provided XYZ color to our working linear sRGB space + film_base_color = colour.XYZ_to_RGB(xyz_values, 'sRGB') + except (ValueError, IndexError) as e: + print("Error: Invalid --color format. Please use 'X,Y,Z', e.g., '0.41,0.35,0.18'") + print(e) + sys.exit(1) + + elif args.border: + film_base_color = find_film_base_from_border(linear_image) + else: + # Default method if neither --border nor --color is specified + film_base_color = find_film_base_from_darkest(linear_image) + + print(f"Determined Film Base Color (Linear RGB): {np.round(film_base_color, 4)}") + # Ensure base color is not black to avoid division by zero + if np.any(film_base_color <= 1e-8): + print("Error: Determined film base color is too dark or black, cannot proceed.") + sys.exit(1) + + # 4. Core Inversion Process + print("Performing inversion...") + # Step A: Remove the film mask by dividing by the base color. + # This is equivalent to Photoshop's 'Divide' blend mode. + # It "white balances" the image against the orange mask. + masked_removed = linear_image / film_base_color + + # Step B: Invert the image. Based on the principle that exposure = 1 / transmittance. + # Add a small epsilon to prevent division by zero in pure black areas. + epsilon = 1e-8 + inverted_image = 1.0 / (masked_removed + epsilon) + + # 5. Normalize for Output + # The inverted values are unbounded. We must normalize them into the 0-1 range. + # This is a technical step, not an artistic one like levels. It preserves tonal relationships. + max_val = np.percentile(inverted_image, 99.95) # Avoid blowing out specular highlights + if max_val <= epsilon: + print("Warning: Inverted image is black. Result will be black.") + normalized_linear_positive = np.zeros_like(inverted_image) + else: + normalized_linear_positive = inverted_image / max_val + + # Clip any remaining >1 values + normalized_linear_positive = np.clip(normalized_linear_positive, 0.0, 1.0) + + # 6. Color Space Conversion (to Output Space) + output_space_name = "Rec.2020" if args.wide else "sRGB" + print(f"Converting linear positive to target space ({output_space_name})...") + + if args.wide: + # Convert from linear sRGB primaries to linear Rec.2020 primaries + wide_linear = colour.RGB_to_RGB( + normalized_linear_positive, srgb_colourspace, colour.models.RGB_COLOURSPACE_BT2020 + ) + # Apply the Rec.2020 gamma curve + final_image_float = colour.cctf_encoding(wide_linear, function='ITU-R BT.2020') + else: + # Just apply the sRGB gamma curve + final_image_float = colour.cctf_encoding(normalized_linear_positive, function='sRGB') + + # 7. De-normalize and Save + print(f"Saving to {args.output_file}") + # Convert from float (0-1) back to 16-bit integer (0-65535) + output_image = (np.clip(final_image_float, 0.0, 1.0) * 65535).astype(np.uint16) + + imageio.imwrite(args.output_file, output_image) + + end_time = time.time() + print(f"\nProcessing complete in {end_time - start_time:.2f} seconds.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/hdrtest.py b/hdrtest.py new file mode 100644 index 0000000..5914cbf --- /dev/null +++ b/hdrtest.py @@ -0,0 +1,36 @@ +import numpy as np +import imageio.v3 as iio + +# --- Parameters --- +WIDTH, HEIGHT = 1024, 1024 +CENTER_X, CENTER_Y = WIDTH // 2, HEIGHT // 2 +RADIUS = 150 +BACKGROUND_COLOR = 0.0 # Pure black +# This is the key change: A "super-white" HDR value for the circle. +# A value of 5.0 simulates a very bright light source. +CIRCLE_BRIGHTNESS = 5.0 +OUTPUT_FILENAME = "halation_test_hdr.tiff" + +# --- Generate the image --- +# Create coordinate grids +y, x = np.mgrid[:HEIGHT, :WIDTH] + +# Calculate distance from the center +distance = np.sqrt((x - CENTER_X)**2 + (y - CENTER_Y)**2) + +# Create a circular mask +mask = distance <= RADIUS + +# Create a 3-channel float image +# Use float32, as it's a standard for HDR images +image_hdr = np.full((HEIGHT, WIDTH, 3), BACKGROUND_COLOR, dtype=np.float32) + +# Set the circle area to the super-white value +image_hdr[mask] = [CIRCLE_BRIGHTNESS, CIRCLE_BRIGHTNESS, CIRCLE_BRIGHTNESS] + +# --- Save the image --- +# Save as a 32-bit float TIFF to preserve the HDR values +iio.imwrite(OUTPUT_FILENAME, image_hdr) + +print(f"✅ Saved HDR test image to '{OUTPUT_FILENAME}'") +print(f" Use this file as the input for your film simulation script.") \ No newline at end of file diff --git a/poster.py b/poster.py new file mode 100644 index 0000000..8d8b655 --- /dev/null +++ b/poster.py @@ -0,0 +1,138 @@ +import argparse +import sys +from PIL import Image, ImageDraw + +def create_full_poster(input_path: str, output_path: str): + """ + Generates a poster from a high-resolution source image. + + The poster features a 50% resolution version of the main image at the top, + with a complete 3x3 grid of all nine "rule of thirds" 250% zoom patches below it. + """ + try: + # Open the high-resolution source image + with Image.open(input_path) as original_image: + original_image = original_image.convert("RGB") + orig_width, orig_height = original_image.size + print(f"Loaded input image: {input_path} ({orig_width}x{orig_height})") + + # --- 1. Define Sizes --- + + # Central image is 50% of original + main_image_width = orig_width // 2 + main_image_height = orig_height // 2 + + # Crop area for patches is 1/6th of original width + patch_crop_width = orig_width // 6 + patch_crop_height = int(patch_crop_width * (orig_height / orig_width)) + + # Patches are zoomed to 250% of their cropped size + zoom_factor = 2.5 + zoomed_patch_width = int(patch_crop_width * zoom_factor) + zoomed_patch_height = int(patch_crop_height * zoom_factor) + + # Define padding values + patch_padding = 25 # Padding between patches in the grid + section_padding = 50 # Padding between the main image and the grid + canvas_padding = 50 # Padding around the entire content + + # --- 2. Calculate Layout & Create Canvas --- + + # Calculate the dimensions of the 3x3 patch grid + grid_width = (zoomed_patch_width * 3) + (patch_padding * 2) + grid_height = (zoomed_patch_height * 3) + (patch_padding * 2) + + # Calculate total canvas dimensions + canvas_width = max(main_image_width, grid_width) + (canvas_padding * 2) + canvas_height = main_image_height + grid_height + section_padding + (canvas_padding * 2) + + # Create the blank white canvas + canvas = Image.new('RGB', (canvas_width, canvas_height), 'white') + draw = ImageDraw.Draw(canvas) + print(f"Created poster canvas of size: {canvas_width}x{canvas_height}") + + # --- 3. Place Main Image & Draw Highlights --- + + # Create the 50% resolution main image + main_image = original_image.resize((main_image_width, main_image_height), Image.Resampling.LANCZOS) + + # Position and paste the main image in the top section + main_image_x = (canvas_width - main_image_width) // 2 + main_image_y = canvas_padding + canvas.paste(main_image, (main_image_x, main_image_y)) + + # Define the center points for the Rule of Thirds grid on the ORIGINAL image + thirds_points = [ + (orig_width // 6, orig_height // 6), (orig_width // 2, orig_height // 6), (5 * orig_width // 6, orig_height // 6), + (orig_width // 6, orig_height // 2), (orig_width // 2, orig_height // 2), (5 * orig_width // 6, orig_height // 2), + (orig_width // 6, 5 * orig_height // 6), (orig_width // 2, 5 * orig_height // 6), (5 * orig_width // 6, 5 * orig_height // 6), + ] + + # Draw all 9 highlight boxes on the main image + print("Drawing highlight boxes on main image...") + for center_x, center_y in thirds_points: + crop_left = center_x - (patch_crop_width // 2) + crop_top = center_y - (patch_crop_height // 2) + # Scale coordinates to the 50% main image and add its offset + highlight_x1 = main_image_x + int(crop_left * 0.5) + highlight_y1 = main_image_y + int(crop_top * 0.5) + highlight_x2 = highlight_x1 + int(patch_crop_width * 0.5) + highlight_y2 = highlight_y1 + int(patch_crop_height * 0.5) + draw.rectangle((highlight_x1, highlight_y1, highlight_x2, highlight_y2), outline="red", width=4) + + # --- 4. Create and Place all 9 Patches in a Grid --- + + print("Generating and placing 9 zoomed patches in a grid...") + grid_origin_x = (canvas_width - grid_width) // 2 + grid_origin_y = main_image_y + main_image_height + section_padding + + for i, (center_x, center_y) in enumerate(thirds_points): + # Define the crop box on the ORIGINAL image + crop_box = ( + center_x - patch_crop_width // 2, center_y - patch_crop_height // 2, + center_x + patch_crop_width // 2, center_y + patch_crop_height // 2 + ) + + # Crop the patch and zoom it + patch = original_image.crop(crop_box) + zoomed_patch = patch.resize((zoomed_patch_width, zoomed_patch_height), Image.Resampling.LANCZOS) + + # Determine the patch's position in the 3x3 grid + row, col = divmod(i, 3) + patch_x = grid_origin_x + col * (zoomed_patch_width + patch_padding) + patch_y = grid_origin_y + row * (zoomed_patch_height + patch_padding) + + # Paste the patch and draw a border + canvas.paste(zoomed_patch, (patch_x, patch_y)) + draw.rectangle( + (patch_x, patch_y, patch_x + zoomed_patch_width, patch_y + zoomed_patch_height), + outline="black", + width=2 + ) + + # --- 5. Save the Final Poster --- + canvas.save(output_path, quality=95) + print(f"\nSuccess! Poster saved to: {output_path}") + + except FileNotFoundError: + print(f"Error: The input file was not found at '{input_path}'", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Create a poster with a main image and a full 3x3 grid of zoomed-in 'rule of thirds' patches.", + formatter_class=argparse.RawTextHelpFormatter, + epilog="Example:\n python create_poster_v2.py my_photo.jpg poster_result.png" + ) + parser.add_argument("input", help="Path to the high-resolution input image.") + parser.add_argument("output", help="Path for the generated poster image.") + + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + + args = parser.parse_args() + create_full_poster(args.input, args.output) \ No newline at end of file diff --git a/posterv2.py b/posterv2.py new file mode 100644 index 0000000..dd08840 --- /dev/null +++ b/posterv2.py @@ -0,0 +1,167 @@ +import argparse +import sys +from PIL import Image, ImageDraw + +def create_comparison_poster(input_path1: str, input_path2: str, output_path: str): + """ + Generates a comparison poster from two high-resolution source images. + + The poster features 50% resolution versions of both main images side-by-side at the top. + Below, it shows a complete 3x3 grid of "rule of thirds" patches. Each grid cell + contains a side-by-side comparison of the 250% zoomed patch from both images. + """ + try: + # Open the two high-resolution source images + with Image.open(input_path1) as original_image1, Image.open(input_path2) as original_image2: + original_image1 = original_image1.convert("RGB") + original_image2 = original_image2.convert("RGB") + + # --- Ensure images are the same size for consistent cropping --- + orig_width, orig_height = original_image1.size + if original_image1.size != original_image2.size: + print(f"Warning: Image sizes differ. Resizing second image from {original_image2.size} to {original_image1.size}.") + original_image2 = original_image2.resize(original_image1.size, Image.Resampling.LANCZOS) + + print(f"Loaded input image 1: {input_path1} ({orig_width}x{orig_height})") + print(f"Loaded input image 2: {input_path2} ({orig_width}x{orig_height})") + + # --- 1. Define Sizes --- + + # Main images are 50% of original + main_image_width = orig_width // 2 + main_image_height = orig_height // 2 + + # Crop area for patches is 1/6th of original width + patch_crop_width = orig_width // 6 + patch_crop_height = int(patch_crop_width * (orig_height / orig_width)) + + # Patches are zoomed to 250% of their cropped size + zoom_factor = 2.5 + zoomed_patch_width = int(patch_crop_width * zoom_factor) + zoomed_patch_height = int(patch_crop_height * zoom_factor) + + # Define padding values + comparison_padding = 10 # Padding between the two patches in a pair + patch_padding = 25 # Padding between patch pairs in the grid + section_padding = 50 # Padding between main images and the grid + canvas_padding = 50 # Padding around the entire content + + # --- 2. Calculate Layout & Create Canvas --- + + # Calculate the dimensions of the top section (two main images side-by-side) + top_section_width = (main_image_width * 2) + section_padding + + # Calculate the dimensions of a single side-by-side comparison patch pair + comparison_pair_width = (zoomed_patch_width * 2) + comparison_padding + + # Calculate the dimensions of the full 3x3 patch grid + grid_width = (comparison_pair_width * 3) + (patch_padding * 2) + grid_height = (zoomed_patch_height * 3) + (patch_padding * 2) + + # Calculate total canvas dimensions + canvas_width = max(top_section_width, grid_width) + (canvas_padding * 2) + canvas_height = main_image_height + grid_height + section_padding + (canvas_padding * 2) + + # Create the blank white canvas + canvas = Image.new('RGB', (canvas_width, canvas_height), 'white') + draw = ImageDraw.Draw(canvas) + print(f"Created poster canvas of size: {canvas_width}x{canvas_height}") + + # --- 3. Place Main Images & Draw Highlights --- + + # Create the 50% resolution main images + main_image1 = original_image1.resize((main_image_width, main_image_height), Image.Resampling.LANCZOS) + main_image2 = original_image2.resize((main_image_width, main_image_height), Image.Resampling.LANCZOS) + + # Position and paste the main images in the top section + top_section_x_start = (canvas_width - top_section_width) // 2 + main_image_y = canvas_padding + main_image1_x = top_section_x_start + main_image2_x = top_section_x_start + main_image_width + section_padding + + canvas.paste(main_image1, (main_image1_x, main_image_y)) + canvas.paste(main_image2, (main_image2_x, main_image_y)) + + # Define the center points for the Rule of Thirds grid on the ORIGINAL image + thirds_points = [ + (orig_width // 6, orig_height // 6), (orig_width // 2, orig_height // 6), (5 * orig_width // 6, orig_height // 6), + (orig_width // 6, orig_height // 2), (orig_width // 2, orig_height // 2), (5 * orig_width // 6, orig_height // 2), + (orig_width // 6, 5 * orig_height // 6), (orig_width // 2, 5 * orig_height // 6), (5 * orig_width // 6, 5 * orig_height // 6), + ] + + # Draw all 9 highlight boxes on BOTH main images + print("Drawing highlight boxes on main images...") + for main_img_x_offset in [main_image1_x, main_image2_x]: + for center_x, center_y in thirds_points: + crop_left = center_x - (patch_crop_width // 2) + crop_top = center_y - (patch_crop_height // 2) + # Scale coordinates to the 50% main image and add its offset + hl_x1 = main_img_x_offset + int(crop_left * 0.5) + hl_y1 = main_image_y + int(crop_top * 0.5) + hl_x2 = hl_x1 + int(patch_crop_width * 0.5) + hl_y2 = hl_y1 + int(patch_crop_height * 0.5) + draw.rectangle((hl_x1, hl_y1, hl_x2, hl_y2), outline="red", width=4) + + # --- 4. Create and Place all 9 Comparison Patch Pairs in a Grid --- + + print("Generating and placing 9 zoomed patch pairs in a grid...") + grid_origin_x = (canvas_width - grid_width) // 2 + grid_origin_y = main_image_y + main_image_height + section_padding + + for i, (center_x, center_y) in enumerate(thirds_points): + # Define the crop box on the ORIGINAL images (it's the same for both) + crop_box = ( + center_x - patch_crop_width // 2, center_y - patch_crop_height // 2, + center_x + patch_crop_width // 2, center_y + patch_crop_height // 2 + ) + + # Crop the patch from each image and zoom it + patch1 = original_image1.crop(crop_box) + zoomed_patch1 = patch1.resize((zoomed_patch_width, zoomed_patch_height), Image.Resampling.LANCZOS) + + patch2 = original_image2.crop(crop_box) + zoomed_patch2 = patch2.resize((zoomed_patch_width, zoomed_patch_height), Image.Resampling.LANCZOS) + + # Determine the patch pair's position in the 3x3 grid + row, col = divmod(i, 3) + pair_x_start = grid_origin_x + col * (comparison_pair_width + patch_padding) + patch_y = grid_origin_y + row * (zoomed_patch_height + patch_padding) + + # Calculate individual patch coordinates within the pair + patch1_x = pair_x_start + patch2_x = pair_x_start + zoomed_patch_width + comparison_padding + + # Paste the patches and draw borders + canvas.paste(zoomed_patch1, (patch1_x, patch_y)) + draw.rectangle((patch1_x, patch_y, patch1_x + zoomed_patch_width, patch_y + zoomed_patch_height), outline="black", width=2) + + canvas.paste(zoomed_patch2, (patch2_x, patch_y)) + draw.rectangle((patch2_x, patch_y, patch2_x + zoomed_patch_width, patch_y + zoomed_patch_height), outline="black", width=2) + + # --- 5. Save the Final Poster --- + canvas.save(output_path, quality=95) + print(f"\nSuccess! Comparison poster saved to: {output_path}") + + except FileNotFoundError as e: + print(f"Error: An input file was not found. Details: {e}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred: {e}", file=sys.stderr) + sys.exit(1) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Create a side-by-side comparison poster from two images, featuring main views and a 3x3 grid of zoomed-in 'rule of thirds' patches.", + formatter_class=argparse.RawTextHelpFormatter, + epilog="Example:\n python create_comparison_poster.py image_A.jpg image_B.jpg comparison_result.png" + ) + parser.add_argument("input1", help="Path to the first high-resolution input image (Image A).") + parser.add_argument("input2", help="Path to the second high-resolution input image (Image B).") + parser.add_argument("output", help="Path for the generated comparison poster image.") + + if len(sys.argv) < 4: + parser.print_help(sys.stderr) + sys.exit(1) + + args = parser.parse_args() + create_comparison_poster(args.input1, args.input2, args.output) \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 9d5c67d..7102e5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,14 +5,27 @@ description = "Add your description here" readme = "README.md" requires-python = ">=3.13" dependencies = [ - "colour-science>=0.4.6", - "imageio>=2.37.0", - "jupyter>=1.1.1", - "jupyterlab>=4.4.3", - "numpy>=2.2.6", - "pillow>=11.2.1", - "pyfftw>=0.15.0", - "rawpy>=0.25.0", - "scipy>=1.15.3", - "warp-lang>=1.7.2", + "colour-science>=0.4.6", + "imageio>=2.37.0", + "jupyter>=1.1.1", + "jupyterlab>=4.4.3", + "matplotlib>=3.10.3", + "numpy>=2.2.6", + "opencv-python>=4.11.0.86", + "pillow>=11.2.1", + "pyfftw>=0.15.0", + "rawpy>=0.25.0", + "scikit-image>=0.25.2", + "scipy>=1.15.3", + "torch>=2.7.1", + "warp-lang>=1.7.2", ] + + +[tool.uv.sources] +torch = [{ index = "pytorch-cu128" }] + +[[tool.uv.index]] +name = "pytorch-cu128" +url = "https://download.pytorch.org/whl/cu128" +explicit = true diff --git a/sim_data/ektar_100.json b/sim_data/ektar_100.json index 32427b8..48081f2 100644 --- a/sim_data/ektar_100.json +++ b/sim_data/ektar_100.json @@ -1,6 +1,6 @@ { "info": { - "name": "Ektar 100", + "name": "Ektar", "description": "KODAK PROFESSIONAL EKTAR 100 Film is the world's finest grain color negative film. With ISO 100 speed, high saturation and ultra-vivid color, this film offers the finest, smoothest grain of any color negative film available today. An ideal choice for commercial photographers and advanced amateurs, KODAK PROFESSIONAL EKTAR 100 Film is recommended for applications such as nature, travel and outdoor photography, as well as for fashion and product photography.", "format_mm": 35, "version": "1.0.0" diff --git a/sim_data/gold_1000.json b/sim_data/gold_1000.json new file mode 100644 index 0000000..ee6b3b2 --- /dev/null +++ b/sim_data/gold_1000.json @@ -0,0 +1,394 @@ +{ + "info": { + "name": "Gold", + "description": "KODAK ROYAL GOLD 1000 Film with its high sharpness and good grain is intended for low-light situations or subjects that require higher shutter speeds to stop action. It also allows you to use high shutter speeds for hand-holding telephoto lenses, or small apertures for increasing depth of field. Its improved sensitivity to tungsten light will provide pleasing results in situations where the lighting is difficult to meter. Although the film is balanced for exposure with daylight or electronic flash, you can also expose it with most existing light sources without filters.", + "format_mm": 35, + "version": "1.0.0" + }, + "processing": { + "gamma": { + "r_factor": 1.0, + "g_factor": 1.0, + "b_factor": 1.0 + }, + "balance": { + "r_shift": 0.0, + "g_shift": 0.0, + "b_shift": 0.0 + } + }, + "properties": { + "calibration": { + "iso": 1000, + "middle_gray_logE": -1.14 + }, + "halation": { + "strength": { + "r": 0.028, + "g": 0.014, + "b": 0.004 + }, + "size_um": { + "r": 400.0, + "g": 200.0, + "b": 100.0 + } + }, + "couplers": { + "saturation_amount": 1.0, + "dir_amount_rgb": [0.7, 0.9, 0.5], + "dir_diffusion_um": 15.0, + "dir_diffusion_interlayer": 1.5 + }, + "interlayer": { + "diffusion_um": 3.33 + }, + "curves": { + "hd": [{"d":-3.86,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.82,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.77,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.73,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.68,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.64,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.59,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.55,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.51,"b":0.92,"g":0.77,"r":0.34}, + {"d":-3.46,"b":0.92,"g":0.77,"r":0.35}, + {"d":-3.41,"b":0.92,"g":0.77,"r":0.35}, + {"d":-3.37,"b":0.93,"g":0.77,"r":0.35}, + {"d":-3.33,"b":0.93,"g":0.78,"r":0.35}, + {"d":-3.28,"b":0.94,"g":0.78,"r":0.36}, + {"d":-3.24,"b":0.95,"g":0.78,"r":0.36}, + {"d":-3.19,"b":0.96,"g":0.79,"r":0.37}, + {"d":-3.15,"b":0.97,"g":0.8,"r":0.38}, + {"d":-3.1,"b":0.99,"g":0.81,"r":0.39}, + {"d":-3.06,"b":1.01,"g":0.82,"r":0.4}, + {"d":-3.01,"b":1.02,"g":0.83,"r":0.41}, + {"d":-2.97,"b":1.05,"g":0.85,"r":0.43}, + {"d":-2.92,"b":1.07,"g":0.86,"r":0.44}, + {"d":-2.88,"b":1.1,"g":0.88,"r":0.46}, + {"d":-2.83,"b":1.13,"g":0.9,"r":0.48}, + {"d":-2.79,"b":1.16,"g":0.92,"r":0.5}, + {"d":-2.74,"b":1.19,"g":0.95,"r":0.52}, + {"d":-2.7,"b":1.22,"g":0.97,"r":0.55}, + {"d":-2.65,"b":1.25,"g":1,"r":0.57}, + {"d":-2.61,"b":1.28,"g":1.02,"r":0.59}, + {"d":-2.56,"b":1.32,"g":1.05,"r":0.61}, + {"d":-2.52,"b":1.35,"g":1.08,"r":0.64}, + {"d":-2.47,"b":1.38,"g":1.1,"r":0.66}, + {"d":-2.43,"b":1.41,"g":1.13,"r":0.69}, + {"d":-2.38,"b":1.45,"g":1.16,"r":0.71}, + {"d":-2.34,"b":1.48,"g":1.18,"r":0.74}, + {"d":-2.29,"b":1.51,"g":1.21,"r":0.77}, + {"d":-2.25,"b":1.54,"g":1.24,"r":0.79}, + {"d":-2.2,"b":1.57,"g":1.26,"r":0.82}, + {"d":-2.16,"b":1.6,"g":1.29,"r":0.84}, + {"d":-2.11,"b":1.64,"g":1.32,"r":0.87}, + {"d":-2.07,"b":1.67,"g":1.35,"r":0.89}, + {"d":-2.02,"b":1.7,"g":1.37,"r":0.92}, + {"d":-1.98,"b":1.73,"g":1.4,"r":0.95}, + {"d":-1.93,"b":1.77,"g":1.43,"r":0.97}, + {"d":-1.89,"b":1.8,"g":1.46,"r":1}, + {"d":-1.84,"b":1.83,"g":1.49,"r":1.03}, + {"d":-1.8,"b":1.86,"g":1.51,"r":1.05}, + {"d":-1.75,"b":1.9,"g":1.54,"r":1.08}, + {"d":-1.71,"b":1.93,"g":1.57,"r":1.11}, + {"d":-1.66,"b":1.96,"g":1.6,"r":1.13}, + {"d":-1.62,"b":1.99,"g":1.63,"r":1.16}, + {"d":-1.57,"b":2.03,"g":1.66,"r":1.19}, + {"d":-1.53,"b":2.06,"g":1.68,"r":1.22}, + {"d":-1.48,"b":2.09,"g":1.71,"r":1.24}, + {"d":-1.44,"b":2.13,"g":1.74,"r":1.27}, + {"d":-1.39,"b":2.16,"g":1.77,"r":1.3}, + {"d":-1.35,"b":2.19,"g":1.8,"r":1.32}, + {"d":-1.3,"b":2.23,"g":1.82,"r":1.35}, + {"d":-1.26,"b":2.26,"g":1.85,"r":1.37}, + {"d":-1.21,"b":2.29,"g":1.88,"r":1.4}, + {"d":-1.17,"b":2.32,"g":1.91,"r":1.43}, + {"d":-1.12,"b":2.36,"g":1.94,"r":1.45}, + {"d":-1.08,"b":2.39,"g":1.96,"r":1.48}, + {"d":-1.04,"b":2.42,"g":1.99,"r":1.5}, + {"d":-0.99,"b":2.45,"g":2.02,"r":1.53}, + {"d":-0.94,"b":2.48,"g":2.04,"r":1.55}, + {"d":-0.9,"b":2.51,"g":2.07,"r":1.58}, + {"d":-0.86,"b":2.54,"g":2.09,"r":1.61}, + {"d":-0.81,"b":2.57,"g":2.11,"r":1.63}, + {"d":-0.77,"b":2.6,"g":2.14,"r":1.65}, + {"d":-0.72,"b":2.62,"g":2.16,"r":1.67}, + {"d":-0.68,"b":2.65,"g":2.19,"r":1.7}, + {"d":-0.63,"b":2.67,"g":2.21,"r":1.72}, + {"d":-0.59,"b":2.7,"g":2.23,"r":1.74}, + {"d":-0.54,"b":2.72,"g":2.26,"r":1.76}, + {"d":-0.5,"b":2.75,"g":2.28,"r":1.78}, + {"d":-0.45,"b":2.77,"g":2.3,"r":1.8}, + {"d":-0.41,"b":2.79,"g":2.32,"r":1.81}, + {"d":-0.36,"b":2.82,"g":2.34,"r":1.83}, + {"d":-0.32,"b":2.84,"g":2.36,"r":1.85}, + {"d":-0.27,"b":2.86,"g":2.38,"r":1.87}, + {"d":-0.23,"b":2.88,"g":2.39,"r":1.88}, + {"d":-0.18,"b":2.9,"g":2.41,"r":1.9}, + {"d":-0.14,"b":2.92,"g":2.43,"r":1.92}, + {"d":-0.09,"b":2.93,"g":2.44,"r":1.93}], + "spectral_sensitivity" : [ + {"wavelength":382.8,"y":1.82,"m":0,"c":0}, + {"wavelength":387.3,"y":1.88,"m":0,"c":0}, + {"wavelength":390.1,"y":1.93,"m":0,"c":0}, + {"wavelength":390.7,"y":1.99,"m":0,"c":0}, + {"wavelength":393.8,"y":2.04,"m":0,"c":0}, + {"wavelength":395.1,"y":2.12,"m":0,"c":0}, + {"wavelength":396.6,"y":2.18,"m":0,"c":0}, + {"wavelength":397.9,"y":2.18,"m":0,"c":0}, + {"wavelength":398,"y":2.23,"m":0,"c":0}, + {"wavelength":399.7,"y":2.28,"m":0,"c":0}, + {"wavelength":400.1,"y":2.22,"m":0,"c":0}, + {"wavelength":400.2,"y":2.26,"m":0,"c":0}, + {"wavelength":400.2,"y":2.28,"m":0,"c":0}, + {"wavelength":400.7,"y":2.35,"m":0,"c":0}, + {"wavelength":403.5,"y":2.48,"m":0,"c":0}, + {"wavelength":404.8,"y":2.54,"m":0,"c":0}, + {"wavelength":406.2,"y":2.61,"m":0,"c":0}, + {"wavelength":407.5,"y":2.69,"m":0,"c":0}, + {"wavelength":408.6,"y":2.75,"m":0,"c":0}, + {"wavelength":409.5,"y":2.81,"m":0,"c":0}, + {"wavelength":410.5,"y":2.87,"m":0,"c":0}, + {"wavelength":412.5,"y":2.93,"m":0,"c":0}, + {"wavelength":416.2,"y":2.97,"m":0,"c":0}, + {"wavelength":421,"y":2.99,"m":0,"c":0}, + {"wavelength":426.4,"y":2.98,"m":0,"c":0}, + {"wavelength":431.4,"y":2.97,"m":0,"c":0}, + {"wavelength":436.5,"y":2.96,"m":0,"c":0}, + {"wavelength":441.7,"y":2.94,"m":0,"c":0}, + {"wavelength":446.9,"y":2.92,"m":0,"c":0}, + {"wavelength":451.2,"y":2.91,"m":0,"c":0}, + {"wavelength":457.1,"y":2.88,"m":0,"c":0}, + {"wavelength":462.2,"y":2.86,"m":1.48,"c":0}, + {"wavelength":467.6,"y":2.86,"m":1.5,"c":0}, + {"wavelength":472.8,"y":2.85,"m":1.56,"c":0}, + {"wavelength":477.3,"y":2.81,"m":1.62,"c":0}, + {"wavelength":480.7,"y":2.76,"m":1.73,"c":0}, + {"wavelength":483.1,"y":2.71,"m":1.79,"c":0}, + {"wavelength":485.4,"y":2.65,"m":1.85,"c":0}, + {"wavelength":487.7,"y":2.6,"m":1.91,"c":0}, + {"wavelength":490,"y":2.54,"m":2.08,"c":0}, + {"wavelength":492.3,"y":2.49,"m":2.15,"c":0}, + {"wavelength":494.7,"y":2.43,"m":2.2,"c":0}, + {"wavelength":500.4,"y":2.33,"m":2.26,"c":0}, + {"wavelength":500.6,"y":2.26,"m":2.31,"c":0}, + {"wavelength":503.4,"y":2.22,"m":2.31,"c":0}, + {"wavelength":504.4,"y":2.17,"m":2.31,"c":0}, + {"wavelength":505.7,"y":2.12,"m":2.36,"c":0}, + {"wavelength":506.8,"y":2.07,"m":2.36,"c":0}, + {"wavelength":507.8,"y":2.01,"m":2.41,"c":0}, + {"wavelength":510.3,"y":1.94,"m":2.41,"c":0}, + {"wavelength":511.7,"y":1.87,"m":2.41,"c":0}, + {"wavelength":512.9,"y":1.82,"m":2.41,"c":0}, + {"wavelength":514.1,"y":1.76,"m":2.47,"c":0}, + {"wavelength":515.4,"y":1.71,"m":2.47,"c":0}, + {"wavelength":516.6,"y":1.65,"m":2.47,"c":0}, + {"wavelength":517.8,"y":1.6,"m":2.47,"c":0}, + {"wavelength":519,"y":1.54,"m":2.47,"c":0}, + {"wavelength":520.2,"y":1.49,"m":2.52,"c":0}, + {"wavelength":521.4,"y":1.43,"m":2.52,"c":0}, + {"wavelength":522.5,"y":1.38,"m":2.52,"c":0}, + {"wavelength":523.6,"y":1.32,"m":2.52,"c":0}, + {"wavelength":524.5,"y":1.27,"m":2.52,"c":0}, + {"wavelength":525.4,"y":1.21,"m":2.58,"c":0}, + {"wavelength":526.3,"y":1.16,"m":2.58,"c":0}, + {"wavelength":527.1,"y":1.11,"m":2.58,"c":0}, + {"wavelength":527.7,"y":1.07,"m":2.58,"c":0}, + {"wavelength":528.7,"y":1,"m":2.58,"c":0}, + {"wavelength":529.7,"y":0.92,"m":2.64,"c":0}, + {"wavelength":530.5,"y":0.87,"m":2.64,"c":0}, + {"wavelength":535.2,"y":0,"m":2.7,"c":0}, + {"wavelength":540.1,"y":0,"m":2.74,"c":0}, + {"wavelength":545.1,"y":0,"m":2.76,"c":0}, + {"wavelength":550.7,"y":0,"m":2.77,"c":1.39}, + {"wavelength":553.8,"y":0,"m":2.77,"c":1.45}, + {"wavelength":555.7,"y":0,"m":2.74,"c":1.51}, + {"wavelength":557.6,"y":0,"m":2.74,"c":1.56}, + {"wavelength":559.9,"y":0,"m":2.7,"c":1.62}, + {"wavelength":560.5,"y":0,"m":2.7,"c":1.69}, + {"wavelength":565.3,"y":0,"m":2.67,"c":1.74}, + {"wavelength":567.2,"y":0,"m":2.63,"c":1.8}, + {"wavelength":569.8,"y":0,"m":2.63,"c":1.86}, + {"wavelength":572.6,"y":0,"m":2.58,"c":1.91}, + {"wavelength":574.6,"y":0,"m":2.52,"c":1.97}, + {"wavelength":576.1,"y":0,"m":2.47,"c":2.02}, + {"wavelength":577.3,"y":0,"m":2.41,"c":2.09}, + {"wavelength":578.5,"y":0,"m":2.36,"c":2.09}, + {"wavelength":579.5,"y":0,"m":2.3,"c":2.2}, + {"wavelength":580.9,"y":0,"m":2.17,"c":2.2}, + {"wavelength":581.1,"y":0,"m":2.22,"c":2.25}, + {"wavelength":582.5,"y":0,"m":2.09,"c":2.25}, + {"wavelength":583.1,"y":0,"m":2.01,"c":2.25}, + {"wavelength":584.4,"y":0,"m":1.93,"c":2.25}, + {"wavelength":585.1,"y":0,"m":1.87,"c":2.32}, + {"wavelength":585.7,"y":0,"m":1.82,"c":2.32}, + {"wavelength":586.3,"y":0,"m":1.76,"c":2.32}, + {"wavelength":586.9,"y":0,"m":1.71,"c":2.32}, + {"wavelength":588.2,"y":0,"m":1.6,"c":2.32}, + {"wavelength":588.9,"y":0,"m":1.54,"c":2.37}, + {"wavelength":589.6,"y":0,"m":1.49,"c":2.37}, + {"wavelength":590.8,"y":0,"m":1.38,"c":2.37}, + {"wavelength":592.2,"y":0,"m":1.27,"c":2.37}, + {"wavelength":592.9,"y":0,"m":1.22,"c":2.37}, + {"wavelength":594.4,"y":0,"m":1.11,"c":2.41}, + {"wavelength":595,"y":0,"m":1.07,"c":2.41}, + {"wavelength":596,"y":0,"m":1,"c":2.41}, + {"wavelength":597.2,"y":0,"m":0.92,"c":2.41}, + {"wavelength":598,"y":0,"m":0.87,"c":2.41}, + {"wavelength":598.8,"y":0,"m":0.81,"c":2.44}, + {"wavelength":601.3,"y":0,"m":0.62,"c":2.44}, + {"wavelength":606.1,"y":0,"m":0,"c":2.49}, + {"wavelength":611.4,"y":0,"m":0,"c":2.54}, + {"wavelength":616.6,"y":0,"m":0,"c":2.57}, + {"wavelength":621.8,"y":0,"m":0,"c":2.61}, + {"wavelength":626.3,"y":0,"m":0,"c":2.67}, + {"wavelength":630.2,"y":0,"m":0,"c":2.73}, + {"wavelength":633.7,"y":0,"m":0,"c":2.79}, + {"wavelength":636.1,"y":0,"m":0,"c":2.84}, + {"wavelength":638.8,"y":0,"m":0,"c":2.9}, + {"wavelength":643.4,"y":0,"m":0,"c":2.93}, + {"wavelength":648.3,"y":0,"m":0,"c":2.94}, + {"wavelength":653.1,"y":0,"m":0,"c":2.95}, + {"wavelength":658.5,"y":0,"m":0,"c":2.97}, + {"wavelength":663.2,"y":0,"m":0,"c":2.96}, + {"wavelength":664.5,"y":0,"m":0,"c":2.9}, + {"wavelength":665.3,"y":0,"m":0,"c":2.85}, + {"wavelength":666.2,"y":0,"m":0,"c":2.79}, + {"wavelength":667,"y":0,"m":0,"c":2.74}, + {"wavelength":667.8,"y":0,"m":0,"c":2.68}, + {"wavelength":668.6,"y":0,"m":0,"c":2.63}, + {"wavelength":669.2,"y":0,"m":0,"c":2.57}, + {"wavelength":669.8,"y":0,"m":0,"c":2.52}, + {"wavelength":670.6,"y":0,"m":0,"c":2.46}, + {"wavelength":671.2,"y":0,"m":0,"c":2.41}, + {"wavelength":671.8,"y":0,"m":0,"c":2.35}, + {"wavelength":672.4,"y":0,"m":0,"c":2.3}, + {"wavelength":673,"y":0,"m":0,"c":2.24}, + {"wavelength":673.5,"y":0,"m":0,"c":2.19}, + {"wavelength":674,"y":0,"m":0,"c":2.14}, + {"wavelength":674.6,"y":0,"m":0,"c":2.08}, + {"wavelength":675.5,"y":0,"m":0,"c":2.01}, + {"wavelength":676.3,"y":0,"m":0,"c":1.93}, + {"wavelength":676.8,"y":0,"m":0,"c":1.87}, + {"wavelength":677.3,"y":0,"m":0,"c":1.82}, + {"wavelength":677.9,"y":0,"m":0,"c":1.76}, + {"wavelength":678.5,"y":0,"m":0,"c":1.71}, + {"wavelength":679.1,"y":0,"m":0,"c":1.65}, + {"wavelength":679.6,"y":0,"m":0,"c":1.6}, + {"wavelength":680.1,"y":0,"m":0,"c":1.54}, + {"wavelength":680.7,"y":0,"m":0,"c":1.49}, + {"wavelength":681.3,"y":0,"m":0,"c":1.43}, + {"wavelength":681.9,"y":0,"m":0,"c":1.38}, + {"wavelength":682.5,"y":0,"m":0,"c":1.32}, + {"wavelength":683.2,"y":0,"m":0,"c":1.27}, + {"wavelength":683.8,"y":0,"m":0,"c":1.22}, + {"wavelength":684.6,"y":0,"m":0,"c":1.16}, + {"wavelength":685.3,"y":0,"m":0,"c":1.1}, + {"wavelength":685.9,"y":0,"m":0,"c":1.07}, + {"wavelength":688,"y":0,"m":0,"c":0.95}, + {"wavelength":688.7,"y":0,"m":0,"c":0.87}, + {"wavelength":689.4,"y":0,"m":0,"c":0.81}, + {"wavelength":690.2,"y":0,"m":0,"c":0.76}, + {"wavelength":690.9,"y":0,"m":0,"c":0.7}, + {"wavelength":691.5,"y":0,"m":0,"c":0.67} + ], + "spectral_dye_absorption": [ + {"wavelength":400.25,"y":0.5645,"m":0,"c":0.0003,"dmin":0.6831882116543871}, + {"wavelength":403.52,"y":0.6071,"m":0,"c":0.0004,"dmin":0.6764668453}, + {"wavelength":406.78,"y":0.6497,"m":0,"c":0.0004,"dmin":0.6714668452779639}, + {"wavelength":410.05,"y":0.6919,"m":0,"c":0.0005,"dmin":0.6764668453}, + {"wavelength":413.32,"y":0.7332,"m":0,"c":0.0006,"dmin":0.6831882116543871}, + {"wavelength":416.58,"y":0.773,"m":0,"c":0.0007,"dmin":0.7200267916945747}, + {"wavelength":419.85,"y":0.8111,"m":0,"c":0.0008,"dmin":0.7200267916945748}, + {"wavelength":423.12,"y":0.8468,"m":0,"c":0.001,"dmin":0.7618888144675151}, + {"wavelength":426.38,"y":0.8796,"m":0.0001,"c":0.0012,"dmin":0.797052913596784}, + {"wavelength":429.65,"y":0.9093,"m":0.0001,"c":0.0014,"dmin":0.797052913596785}, + {"wavelength":432.91,"y":0.9353,"m":0.0002,"c":0.0016,"dmin":0.797052913596786}, + {"wavelength":436.18,"y":0.9573,"m":0.0003,"c":0.0019,"dmin":0.8171466845277964}, + {"wavelength":439.45,"y":0.975,"m":0.0005,"c":0.0022,"dmin":0.8171466845277965}, + {"wavelength":442.71,"y":0.9881,"m":0.0008,"c":0.0025,"dmin":0.8204956463496315}, + {"wavelength":445.98,"y":0.9965,"m":0.0013,"c":0.0029,"dmin":0.8204956463496316}, + {"wavelength":449.25,"y":0.9999,"m":0.002,"c":0.0034,"dmin":0.8204956463496317}, + {"wavelength":452.51,"y":0.9984,"m":0.003,"c":0.0039,"dmin":0.8121232417950435}, + {"wavelength":455.78,"y":0.992,"m":0.0045,"c":0.0045,"dmin":0.8121232417950436}, + {"wavelength":459.05,"y":0.9807,"m":0.0067,"c":0.0052,"dmin":0.8121232417950437}, + {"wavelength":462.31,"y":0.9648,"m":0.0098,"c":0.006,"dmin":0.7937039517749497}, + {"wavelength":465.58,"y":0.9445,"m":0.0141,"c":0.0069,"dmin":0.7937039517749498}, + {"wavelength":468.84,"y":0.92,"m":0.0201,"c":0.0079,"dmin":0.7937039517749499}, + {"wavelength":472.11,"y":0.8917,"m":0.028,"c":0.009,"dmin":0.775284661754855}, + {"wavelength":475.38,"y":0.86,"m":0.0386,"c":0.0103,"dmin":0.775284661754856}, + {"wavelength":478.64,"y":0.8254,"m":0.0523,"c":0.0117,"dmin":0.775284661754857}, + {"wavelength":481.91,"y":0.7882,"m":0.0698,"c":0.0133,"dmin":0.7585398526456797}, + {"wavelength":485.18,"y":0.749,"m":0.0919,"c":0.0151,"dmin":0.7585398526456798}, + {"wavelength":488.44,"y":0.7082,"m":0.1191,"c":0.0171,"dmin":0.7585398526456799}, + {"wavelength":491.71,"y":0.6664,"m":0.1521,"c":0.0193,"dmin":0.7434695244474213}, + {"wavelength":494.97,"y":0.6239,"m":0.1914,"c":0.0218,"dmin":0.7434695244474214}, + {"wavelength":498.24,"y":0.5812,"m":0.2373,"c":0.0245,"dmin":0.7518419290020094}, + {"wavelength":501.51,"y":0.5388,"m":0.2898,"c":0.0275,"dmin":0.7518419290020095}, + {"wavelength":504.77,"y":0.497,"m":0.3486,"c":0.0309,"dmin":0.7719356999330208}, + {"wavelength":508.04,"y":0.4562,"m":0.4132,"c":0.0346,"dmin":0.7937039517749498}, + {"wavelength":511.31,"y":0.4167,"m":0.4825,"c":0.0387,"dmin":0.7937039517749499}, + {"wavelength":514.57,"y":0.3787,"m":0.555,"c":0.0431,"dmin":0.7585398526456797}, + {"wavelength":517.84,"y":0.3424,"m":0.629,"c":0.048,"dmin":0.7585398526456798}, + {"wavelength":521.11,"y":0.3082,"m":0.7023,"c":0.0534,"dmin":0.7232350971}, + {"wavelength":524.37,"y":0.2759,"m":0.7725,"c":0.0592,"dmin":0.6932350971198928}, + {"wavelength":527.64,"y":0.2459,"m":0.8371,"c":0.0655,"dmin":0.693235097119893}, + {"wavelength":530.9,"y":0.218,"m":0.8937,"c":0.0724,"dmin":0.644675150703281}, + {"wavelength":534.17,"y":0.1923,"m":0.94,"c":0.0799,"dmin":0.644675150703282}, + {"wavelength":537.44,"y":0.1688,"m":0.974,"c":0.088,"dmin":0.644675150703283}, + {"wavelength":540.7,"y":0.1475,"m":0.9942,"c":0.0967,"dmin":0.6162089752176824}, + {"wavelength":543.97,"y":0.1282,"m":0.9999,"c":0.106,"dmin":0.6162089752176825}, + {"wavelength":547.24,"y":0.1109,"m":0.9907,"c":0.1161,"dmin":0.6162089752176826}, + {"wavelength":550.5,"y":0.0954,"m":0.967,"c":0.1269,"dmin":0.6028131279303415}, + {"wavelength":553.77,"y":0.0817,"m":0.9299,"c":0.1384,"dmin":0.6028131279303416}, + {"wavelength":557.04,"y":0.0696,"m":0.8809,"c":0.1507,"dmin":0.6028131279303417}, + {"wavelength":560.3,"y":0.059,"m":0.8222,"c":0.1637,"dmin":0.5726724715338245}, + {"wavelength":563.57,"y":0.0498,"m":0.756,"c":0.1776,"dmin":0.5726724715338246}, + {"wavelength":566.83,"y":0.0418,"m":0.6848,"c":0.1923,"dmin":0.5241125251172135}, + {"wavelength":570.1,"y":0.0349,"m":0.6112,"c":0.2078,"dmin":0.5241125251172136}, + {"wavelength":573.37,"y":0.0291,"m":0.5373,"c":0.2241,"dmin":0.4621567314132618}, + {"wavelength":576.63,"y":0.024,"m":0.4654,"c":0.2413,"dmin":0.4621567314132619}, + {"wavelength":579.9,"y":0.0198,"m":0.3972,"c":0.2593,"dmin":0.37340924313462825}, + {"wavelength":583.17,"y":0.0162,"m":0.3339,"c":0.2781,"dmin":0.37340924313462825}, + {"wavelength":586.43,"y":0.0132,"m":0.2765,"c":0.2977,"dmin":0.3534092431}, + {"wavelength":589.7,"y":0.0107,"m":0.2256,"c":0.3182,"dmin":0.2997320830542531}, + {"wavelength":592.96,"y":0.0086,"m":0.1814,"c":0.3393,"dmin":0.2997320830542532}, + {"wavelength":596.23,"y":0.0069,"m":0.1436,"c":0.3613,"dmin":0.2997320830542533}, + {"wavelength":599.5,"y":0.0055,"m":0.1121,"c":0.3839,"dmin":0.24614869390488947}, + {"wavelength":602.76,"y":0.0044,"m":0.0861,"c":0.4072,"dmin":0.24614869390488947}, + {"wavelength":606.03,"y":0.0035,"m":0.0652,"c":0.431,"dmin":0.2461486939048895}, + {"wavelength":609.3,"y":0.0027,"m":0.0487,"c":0.4555,"dmin":0.20261219022103147}, + {"wavelength":612.56,"y":0.0022,"m":0.0358,"c":0.4804,"dmin":0.20261219022103147}, + {"wavelength":615.83,"y":0.0017,"m":0.0259,"c":0.5057,"dmin":0.20261219022103147}, + {"wavelength":619.1,"y":0.0013,"m":0.0185,"c":0.5314,"dmin":0.19256530475552575}, + {"wavelength":622.36,"y":0.001,"m":0.013,"c":0.5573,"dmin":0.19256530475552577}, + {"wavelength":625.63,"y":0.0008,"m":0.009,"c":0.5835,"dmin":0.19256530475552577}, + {"wavelength":628.89,"y":0.0006,"m":0.0061,"c":0.6097,"dmin":0.19256530475552577}, + {"wavelength":632.16,"y":0.0004,"m":0.0041,"c":0.6359,"dmin":0.1925653047555258}, + {"wavelength":635.43,"y":0.0003,"m":0.0027,"c":0.6619,"dmin":0.1925653047555258}, + {"wavelength":638.69,"y":0.0003,"m":0.0018,"c":0.6878,"dmin":0.19256530475552575}, + {"wavelength":641.96,"y":0.0002,"m":0.0011,"c":0.7133,"dmin":0.19256530475552577}, + {"wavelength":645.23,"y":0.0001,"m":0.0007,"c":0.7384,"dmin":0.19256530475552577}, + {"wavelength":648.49,"y":0.0001,"m":0.0005,"c":0.763,"dmin":0.19256530475552577}, + {"wavelength":651.76,"y":0.0001,"m":0.0003,"c":0.7869,"dmin":0.20261219022103144}, + {"wavelength":655.03,"y":0.0001,"m":0.0002,"c":0.81,"dmin":0.20261219022103147}, + {"wavelength":658.29,"y":0,"m":0.0001,"c":0.8323,"dmin":0.20261219022103147}, + {"wavelength":661.56,"y":0,"m":0.0001,"c":0.8536,"dmin":0.20261219022103147}, + {"wavelength":664.82,"y":0,"m":0,"c":0.8738,"dmin":0.21098459477561954}, + {"wavelength":668.09,"y":0,"m":0,"c":0.8928,"dmin":0.21098459477561957}, + {"wavelength":671.36,"y":0,"m":0,"c":0.9105,"dmin":0.21098459477561957}, + {"wavelength":674.62,"y":0,"m":0,"c":0.9268,"dmin":0.2109845947756196}, + {"wavelength":677.89,"y":0,"m":0,"c":0.9417,"dmin":0.2109845947756196}, + {"wavelength":681.16,"y":0,"m":0,"c":0.955,"dmin":0.21935699933020764}, + {"wavelength":684.42,"y":0,"m":0,"c":0.9667,"dmin":0.21935699933020764}, + {"wavelength":687.69,"y":0,"m":0,"c":0.9767,"dmin":0.21935699933020766}, + {"wavelength":690.95,"y":0,"m":0,"c":0.985,"dmin":0.21935699933020766}, + {"wavelength":694.22,"y":0,"m":0,"c":0.9915,"dmin":0.2260549229738781}, + {"wavelength":697.49,"y":0,"m":0,"c":0.9962,"dmin":0.2260549229738782}, + {"wavelength":700.75,"y":0,"m":0,"c":0.999,"dmin":0.22103148024112526} + ] + } + } +} \ No newline at end of file diff --git a/sim_data/portra_400.json b/sim_data/portra_400.json index d34cc13..1bbdabb 100644 --- a/sim_data/portra_400.json +++ b/sim_data/portra_400.json @@ -1,6 +1,6 @@ { "info": { - "name": "Portra 400", + "name": "Portra", "description": "KODAK PROFESSIONAL PORTRA 400 is the world's finest grain high-speed color negative film. At true ISO 400 speed, this film delivers spectacular skin tones plus exceptional color saturation over a wide range of lighting conditions. PORTRA 400 Film is the ideal choice for portrait and fashion photography, as well as for nature, travel and outdoor photography, where the action is fast or the lighting can't be controlled.", "format_mm": 35, "version": "1.0.0" diff --git a/testbench.py b/testbench.py new file mode 100644 index 0000000..a53f1ab --- /dev/null +++ b/testbench.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +import os +import sys +import argparse +import itertools +import subprocess +from multiprocessing import Pool +from functools import partial + +# --- Configuration --- + +# This dictionary maps the desired abbreviation to the full command-line flag. +# This makes it easy to add or remove flags in the future. +ARGS_MAP = { + # 'fd': '--force-d65', + # 'pnc': '--perform-negative-correction', + 'pwb': '--perform-white-balance', + 'pec': '--perform-exposure-correction', + # 'rae': '--raw-auto-exposure', + 'sg': '--simulate-grain', + # 'mg': '--mono-grain' +} + +# --- Worker Function for Multiprocessing --- + +def run_filmcolor_command(job_info, filmcolor_path): + """ + Executes a single filmcolor command. + This function is designed to be called by a multiprocessing Pool. + """ + input_file, datasheet, output_file, flags = job_info + + command = [ + filmcolor_path, + input_file, + datasheet, + output_file + ] + command.extend(flags) + + command_str = " ".join(command) + print(f"🚀 Starting job: {os.path.basename(output_file)}") + + try: + # Using subprocess.run to execute the command + # capture_output=True keeps stdout/stderr from cluttering the main display + # text=True decodes stdout/stderr as text + # check=True will raise a CalledProcessError if the command returns a non-zero exit code + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True, + encoding='utf-8' + ) + return f"✅ SUCCESS: Created {output_file}" + except FileNotFoundError: + return f"❌ ERROR: filmcolor executable not found at '{filmcolor_path}'" + except subprocess.CalledProcessError as e: + # This block runs if the command fails (returns non-zero exit code) + error_message = ( + f"❌ FAILURE: Could not process {os.path.basename(input_file)} with {os.path.basename(datasheet)}\n" + f" Command: {command_str}\n" + f" Exit Code: {e.returncode}\n" + f" Stderr: {e.stderr.strip()}" + ) + return error_message + except Exception as e: + return f"❌ UNEXPECTED ERROR: {e}" + + +# --- Main Script Logic --- + +def main(): + parser = argparse.ArgumentParser( + description="A testbench runner for the 'filmcolor' script.", + formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument( + "input_dir", + help="The root directory containing subfolders with RAW images (ARW, DNG)." + ) + parser.add_argument( + "datasheet_dir", + help="The directory containing the film datasheet JSON files." + ) + parser.add_argument( + "filmcolor_path", + help="The path to the 'filmcolor' executable script." + ) + parser.add_argument( + "-j", "--jobs", + type=int, + default=3, + help="Number of parallel jobs to run. (Default: 3)" + ) + args = parser.parse_args() + + # 1. Find all input RAW files + raw_files = [] + print(f"🔎 Scanning for RAW files in '{args.input_dir}'...") + for root, _, files in os.walk(args.input_dir): + for file in files: + if file.lower().endswith(('.arw', '.dng')): + raw_files.append(os.path.join(root, file)) + + if not raw_files: + print("❌ No RAW (.arW or .DNG) files found. Exiting.") + sys.exit(1) + print(f" Found {len(raw_files)} RAW files.") + + # 2. Find all datasheet JSON files + datasheet_files = [] + print(f"🔎 Scanning for JSON files in '{args.datasheet_dir}'...") + try: + for file in os.listdir(args.datasheet_dir): + if file.lower().endswith('.json'): + datasheet_files.append(os.path.join(args.datasheet_dir, file)) + except FileNotFoundError: + print(f"❌ Datasheet directory not found at '{args.datasheet_dir}'. Exiting.") + sys.exit(1) + + if not datasheet_files: + print("❌ No datasheet (.json) files found. Exiting.") + sys.exit(1) + print(f" Found {len(datasheet_files)} datasheet files.") + + # 3. Generate all argument combinations + arg_abbreviations = list(ARGS_MAP.keys()) + all_arg_combos = [] + # Loop from 0 to len(abbreviations) to get combinations of all lengths + for i in range(len(arg_abbreviations) + 1): + for combo in itertools.combinations(arg_abbreviations, i): + all_arg_combos.append(sorted(list(combo))) # Sort for consistent naming + + # 4. Create the full list of jobs to run + jobs_to_run = [] + for raw_file_path in raw_files: + input_dir = os.path.dirname(raw_file_path) + input_filename = os.path.basename(raw_file_path) + + for datasheet_path in datasheet_files: + datasheet_name = os.path.splitext(os.path.basename(datasheet_path))[0] + + for arg_combo_abbrs in all_arg_combos: + # Build the output filename + arg_suffix = "-".join(arg_combo_abbrs) + # Handle the case with no arguments to avoid a trailing hyphen + if arg_suffix: + output_name = f"{input_filename}-{datasheet_name}-{arg_suffix}.jpg" + else: + output_name = f"{input_filename}-{datasheet_name}.jpg" + + output_path = os.path.join(input_dir, output_name) + + # Get the full flags from the abbreviations + flags = [ARGS_MAP[abbr] for abbr in arg_combo_abbrs] + ['--perform-negative-correction'] # always include this flag + + # Add the complete job description to our list + jobs_to_run.append((raw_file_path, datasheet_path, output_path, flags)) + + total_jobs = len(jobs_to_run) + print(f"\n✨ Generated {total_jobs} total jobs to run.") + if total_jobs == 0: + print("Nothing to do. Exiting.") + sys.exit(0) + + # Ask for confirmation before starting a large number of jobs + try: + confirm = input(f"Proceed with running {total_jobs} jobs using {args.jobs} parallel processes? (y/N): ") + if confirm.lower() != 'y': + print("Aborted by user.") + sys.exit(0) + except KeyboardInterrupt: + print("\nAborted by user.") + sys.exit(0) + + + # 5. Run the jobs in a multiprocessing pool + print("\n--- Starting Testbench ---\n") + # `partial` is used to "pre-fill" the filmcolor_path argument of our worker function + worker_func = partial(run_filmcolor_command, filmcolor_path=args.filmcolor_path) + + with Pool(processes=args.jobs) as pool: + # imap_unordered is great for this: it yields results as they complete, + # providing real-time feedback without waiting for all jobs to finish. + for i, result in enumerate(pool.imap_unordered(worker_func, jobs_to_run), 1): + print(f"[{i}/{total_jobs}] {result}") + + print("\n--- Testbench Finished ---") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/uv.lock b/uv.lock index ba1f7ce..4fe8683 100644 --- a/uv.lock +++ b/uv.lock @@ -1,6 +1,12 @@ version = 1 revision = 1 requires-python = ">=3.13" +resolution-markers = [ + "sys_platform == 'darwin'", + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'", + "sys_platform != 'darwin' and sys_platform != 'linux' and sys_platform != 'win32'", +] [[package]] name = "anyio" @@ -225,6 +231,46 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e6/75/49e5bfe642f71f272236b5b2d2691cf915a7283cc0ceda56357b61daa538/comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3", size = 7180 }, ] +[[package]] +name = "contourpy" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/54/eb9bfc647b19f2009dd5c7f5ec51c4e6ca831725f1aea7a993034f483147/contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54", size = 13466130 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/61/5673f7e364b31e4e7ef6f61a4b5121c5f170f941895912f773d95270f3a2/contourpy-1.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb", size = 271630 }, + { url = "https://files.pythonhosted.org/packages/ff/66/a40badddd1223822c95798c55292844b7e871e50f6bfd9f158cb25e0bd39/contourpy-1.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08", size = 255670 }, + { url = "https://files.pythonhosted.org/packages/1e/c7/cf9fdee8200805c9bc3b148f49cb9482a4e3ea2719e772602a425c9b09f8/contourpy-1.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c", size = 306694 }, + { url = "https://files.pythonhosted.org/packages/dd/e7/ccb9bec80e1ba121efbffad7f38021021cda5be87532ec16fd96533bb2e0/contourpy-1.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f", size = 345986 }, + { url = "https://files.pythonhosted.org/packages/dc/49/ca13bb2da90391fa4219fdb23b078d6065ada886658ac7818e5441448b78/contourpy-1.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85", size = 318060 }, + { url = "https://files.pythonhosted.org/packages/c8/65/5245ce8c548a8422236c13ffcdcdada6a2a812c361e9e0c70548bb40b661/contourpy-1.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841", size = 322747 }, + { url = "https://files.pythonhosted.org/packages/72/30/669b8eb48e0a01c660ead3752a25b44fdb2e5ebc13a55782f639170772f9/contourpy-1.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422", size = 1308895 }, + { url = "https://files.pythonhosted.org/packages/05/5a/b569f4250decee6e8d54498be7bdf29021a4c256e77fe8138c8319ef8eb3/contourpy-1.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef", size = 1379098 }, + { url = "https://files.pythonhosted.org/packages/19/ba/b227c3886d120e60e41b28740ac3617b2f2b971b9f601c835661194579f1/contourpy-1.3.2-cp313-cp313-win32.whl", hash = "sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f", size = 178535 }, + { url = "https://files.pythonhosted.org/packages/12/6e/2fed56cd47ca739b43e892707ae9a13790a486a3173be063681ca67d2262/contourpy-1.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9", size = 223096 }, + { url = "https://files.pythonhosted.org/packages/54/4c/e76fe2a03014a7c767d79ea35c86a747e9325537a8b7627e0e5b3ba266b4/contourpy-1.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f", size = 285090 }, + { url = "https://files.pythonhosted.org/packages/7b/e2/5aba47debd55d668e00baf9651b721e7733975dc9fc27264a62b0dd26eb8/contourpy-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739", size = 268643 }, + { url = "https://files.pythonhosted.org/packages/a1/37/cd45f1f051fe6230f751cc5cdd2728bb3a203f5619510ef11e732109593c/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823", size = 310443 }, + { url = "https://files.pythonhosted.org/packages/8b/a2/36ea6140c306c9ff6dd38e3bcec80b3b018474ef4d17eb68ceecd26675f4/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5", size = 349865 }, + { url = "https://files.pythonhosted.org/packages/95/b7/2fc76bc539693180488f7b6cc518da7acbbb9e3b931fd9280504128bf956/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532", size = 321162 }, + { url = "https://files.pythonhosted.org/packages/f4/10/76d4f778458b0aa83f96e59d65ece72a060bacb20cfbee46cf6cd5ceba41/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b", size = 327355 }, + { url = "https://files.pythonhosted.org/packages/43/a3/10cf483ea683f9f8ab096c24bad3cce20e0d1dd9a4baa0e2093c1c962d9d/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52", size = 1307935 }, + { url = "https://files.pythonhosted.org/packages/78/73/69dd9a024444489e22d86108e7b913f3528f56cfc312b5c5727a44188471/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd", size = 1372168 }, + { url = "https://files.pythonhosted.org/packages/0f/1b/96d586ccf1b1a9d2004dd519b25fbf104a11589abfd05484ff12199cca21/contourpy-1.3.2-cp313-cp313t-win32.whl", hash = "sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1", size = 189550 }, + { url = "https://files.pythonhosted.org/packages/b0/e6/6000d0094e8a5e32ad62591c8609e269febb6e4db83a1c75ff8868b42731/contourpy-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69", size = 238214 }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 }, +] + [[package]] name = "debugpy" version = "1.8.14" @@ -274,6 +320,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/90/2b/0817a2b257fe88725c25589d89aec060581aabf668707a8d03b2e9e0cb2a/fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667", size = 23924 }, ] +[[package]] +name = "filelock" +version = "3.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215 }, +] + [[package]] name = "filmsim" version = "0.1.0" @@ -283,11 +338,15 @@ dependencies = [ { name = "imageio" }, { name = "jupyter" }, { name = "jupyterlab" }, + { name = "matplotlib" }, { name = "numpy" }, + { name = "opencv-python" }, { name = "pillow" }, { name = "pyfftw" }, { name = "rawpy" }, + { name = "scikit-image" }, { name = "scipy" }, + { name = "torch" }, { name = "warp-lang" }, ] @@ -297,14 +356,35 @@ requires-dist = [ { name = "imageio", specifier = ">=2.37.0" }, { name = "jupyter", specifier = ">=1.1.1" }, { name = "jupyterlab", specifier = ">=4.4.3" }, + { name = "matplotlib", specifier = ">=3.10.3" }, { name = "numpy", specifier = ">=2.2.6" }, + { name = "opencv-python", specifier = ">=4.11.0.86" }, { name = "pillow", specifier = ">=11.2.1" }, { name = "pyfftw", specifier = ">=0.15.0" }, { name = "rawpy", specifier = ">=0.25.0" }, + { name = "scikit-image", specifier = ">=0.25.2" }, { name = "scipy", specifier = ">=1.15.3" }, + { name = "torch", specifier = ">=2.7.1", index = "https://download.pytorch.org/whl/cu128" }, { name = "warp-lang", specifier = ">=1.7.2" }, ] +[[package]] +name = "fonttools" +version = "4.58.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/a9/3319c6ae07fd9dde51064ddc6d82a2b707efad8ed407d700a01091121bbc/fonttools-4.58.2.tar.gz", hash = "sha256:4b491ddbfd50b856e84b0648b5f7941af918f6d32f938f18e62b58426a8d50e2", size = 3524285 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/01/29f81970a508408af20b434ff5136cd1c7ef92198957eb8ddadfbb9ef177/fonttools-4.58.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:829048ef29dbefec35d95cc6811014720371c95bdc6ceb0afd2f8e407c41697c", size = 2732398 }, + { url = "https://files.pythonhosted.org/packages/0c/f1/095f2338359333adb2f1c51b8b2ad94bf9a2fa17e5fcbdf8a7b8e3672d2d/fonttools-4.58.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:64998c5993431e45b474ed5f579f18555f45309dd1cf8008b594d2fe0a94be59", size = 2306390 }, + { url = "https://files.pythonhosted.org/packages/bf/d4/9eba134c7666a26668c28945355cd86e5d57828b6b8d952a5489fe45d7e2/fonttools-4.58.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b887a1cf9fbcb920980460ee4a489c8aba7e81341f6cdaeefa08c0ab6529591c", size = 4795100 }, + { url = "https://files.pythonhosted.org/packages/2a/34/345f153a24c1340daa62340c3be2d1e5ee6c1ee57e13f6d15613209e688b/fonttools-4.58.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27d74b9f6970cefbcda33609a3bee1618e5e57176c8b972134c4e22461b9c791", size = 4864585 }, + { url = "https://files.pythonhosted.org/packages/01/5f/091979a25c9a6c4ba064716cfdfe9431f78ed6ffba4bd05ae01eee3532e9/fonttools-4.58.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec26784610056a770e15a60f9920cee26ae10d44d1e43271ea652dadf4e7a236", size = 4866191 }, + { url = "https://files.pythonhosted.org/packages/9d/09/3944d0ece4a39560918cba37c2e0453a5f826b665a6db0b43abbd9dbe7e1/fonttools-4.58.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ed0a71d57dd427c0fb89febd08cac9b925284d2a8888e982a6c04714b82698d7", size = 5003867 }, + { url = "https://files.pythonhosted.org/packages/68/97/190b8f9ba22f8b7d07df2faa9fd7087b453776d0705d3cb5b0cbd89b8ef0/fonttools-4.58.2-cp313-cp313-win32.whl", hash = "sha256:994e362b01460aa863ef0cb41a29880bc1a498c546952df465deff7abf75587a", size = 2175688 }, + { url = "https://files.pythonhosted.org/packages/94/ea/0e6d4a39528dbb6e0f908c2ad219975be0a506ed440fddf5453b90f76981/fonttools-4.58.2-cp313-cp313-win_amd64.whl", hash = "sha256:f95dec862d7c395f2d4efe0535d9bdaf1e3811e51b86432fa2a77e73f8195756", size = 2226464 }, + { url = "https://files.pythonhosted.org/packages/e8/e5/c1cb8ebabb80be76d4d28995da9416816653f8f572920ab5e3d2e3ac8285/fonttools-4.58.2-py3-none-any.whl", hash = "sha256:84f4b0bcfa046254a65ee7117094b4907e22dc98097a220ef108030eb3c15596", size = 1114597 }, +] + [[package]] name = "fqdn" version = "1.5.1" @@ -314,6 +394,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121 }, ] +[[package]] +name = "fsspec" +version = "2025.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/f7/27f15d41f0ed38e8fcc488584b57e902b331da7f7c6dcda53721b15838fc/fsspec-2025.5.1.tar.gz", hash = "sha256:2e55e47a540b91843b755e83ded97c6e897fa0942b11490113f09e9c443c2475", size = 303033 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/61/78c7b3851add1481b048b5fdc29067397a1784e2910592bc81bb3f608635/fsspec-2025.5.1-py3-none-any.whl", hash = "sha256:24d3a2e663d5fc735ab256263c4075f374a174c3410c0b25e5bd1970bceaa462", size = 199052 }, +] + [[package]] name = "h11" version = "0.16.0" @@ -653,7 +742,7 @@ dependencies = [ { name = "overrides" }, { name = "packaging" }, { name = "prometheus-client" }, - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, { name = "pyzmq" }, { name = "send2trash" }, { name = "terminado" }, @@ -671,7 +760,7 @@ name = "jupyter-server-terminals" version = "0.5.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, { name = "terminado" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/d5/562469734f476159e99a55426d697cbf8e7eb5efe89fb0e0b4f83a3d3459/jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269", size = 31430 } @@ -739,6 +828,54 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/43/6a/ca128561b22b60bd5a0c4ea26649e68c8556b82bc70a0c396eebc977fe86/jupyterlab_widgets-3.0.15-py3-none-any.whl", hash = "sha256:d59023d7d7ef71400d51e6fee9a88867f6e65e10a4201605d2d7f3e8f012a31c", size = 216571 }, ] +[[package]] +name = "kiwisolver" +version = "1.4.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/b3/e62464a652f4f8cd9006e13d07abad844a47df1e6537f73ddfbf1bc997ec/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09", size = 124156 }, + { url = "https://files.pythonhosted.org/packages/8d/2d/f13d06998b546a2ad4f48607a146e045bbe48030774de29f90bdc573df15/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1", size = 66555 }, + { url = "https://files.pythonhosted.org/packages/59/e3/b8bd14b0a54998a9fd1e8da591c60998dc003618cb19a3f94cb233ec1511/kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c", size = 65071 }, + { url = "https://files.pythonhosted.org/packages/f0/1c/6c86f6d85ffe4d0ce04228d976f00674f1df5dc893bf2dd4f1928748f187/kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b", size = 1378053 }, + { url = "https://files.pythonhosted.org/packages/4e/b9/1c6e9f6dcb103ac5cf87cb695845f5fa71379021500153566d8a8a9fc291/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47", size = 1472278 }, + { url = "https://files.pythonhosted.org/packages/ee/81/aca1eb176de671f8bda479b11acdc42c132b61a2ac861c883907dde6debb/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16", size = 1478139 }, + { url = "https://files.pythonhosted.org/packages/49/f4/e081522473671c97b2687d380e9e4c26f748a86363ce5af48b4a28e48d06/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc", size = 1413517 }, + { url = "https://files.pythonhosted.org/packages/8f/e9/6a7d025d8da8c4931522922cd706105aa32b3291d1add8c5427cdcd66e63/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246", size = 1474952 }, + { url = "https://files.pythonhosted.org/packages/82/13/13fa685ae167bee5d94b415991c4fc7bb0a1b6ebea6e753a87044b209678/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794", size = 2269132 }, + { url = "https://files.pythonhosted.org/packages/ef/92/bb7c9395489b99a6cb41d502d3686bac692586db2045adc19e45ee64ed23/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b", size = 2425997 }, + { url = "https://files.pythonhosted.org/packages/ed/12/87f0e9271e2b63d35d0d8524954145837dd1a6c15b62a2d8c1ebe0f182b4/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3", size = 2376060 }, + { url = "https://files.pythonhosted.org/packages/02/6e/c8af39288edbce8bf0fa35dee427b082758a4b71e9c91ef18fa667782138/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957", size = 2520471 }, + { url = "https://files.pythonhosted.org/packages/13/78/df381bc7b26e535c91469f77f16adcd073beb3e2dd25042efd064af82323/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb", size = 2338793 }, + { url = "https://files.pythonhosted.org/packages/d0/dc/c1abe38c37c071d0fc71c9a474fd0b9ede05d42f5a458d584619cfd2371a/kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2", size = 71855 }, + { url = "https://files.pythonhosted.org/packages/a0/b6/21529d595b126ac298fdd90b705d87d4c5693de60023e0efcb4f387ed99e/kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30", size = 65430 }, + { url = "https://files.pythonhosted.org/packages/34/bd/b89380b7298e3af9b39f49334e3e2a4af0e04819789f04b43d560516c0c8/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c", size = 126294 }, + { url = "https://files.pythonhosted.org/packages/83/41/5857dc72e5e4148eaac5aa76e0703e594e4465f8ab7ec0fc60e3a9bb8fea/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc", size = 67736 }, + { url = "https://files.pythonhosted.org/packages/e1/d1/be059b8db56ac270489fb0b3297fd1e53d195ba76e9bbb30e5401fa6b759/kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712", size = 66194 }, + { url = "https://files.pythonhosted.org/packages/e1/83/4b73975f149819eb7dcf9299ed467eba068ecb16439a98990dcb12e63fdd/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e", size = 1465942 }, + { url = "https://files.pythonhosted.org/packages/c7/2c/30a5cdde5102958e602c07466bce058b9d7cb48734aa7a4327261ac8e002/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880", size = 1595341 }, + { url = "https://files.pythonhosted.org/packages/ff/9b/1e71db1c000385aa069704f5990574b8244cce854ecd83119c19e83c9586/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062", size = 1598455 }, + { url = "https://files.pythonhosted.org/packages/85/92/c8fec52ddf06231b31cbb779af77e99b8253cd96bd135250b9498144c78b/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7", size = 1522138 }, + { url = "https://files.pythonhosted.org/packages/0b/51/9eb7e2cd07a15d8bdd976f6190c0164f92ce1904e5c0c79198c4972926b7/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed", size = 1582857 }, + { url = "https://files.pythonhosted.org/packages/0f/95/c5a00387a5405e68ba32cc64af65ce881a39b98d73cc394b24143bebc5b8/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d", size = 2293129 }, + { url = "https://files.pythonhosted.org/packages/44/83/eeb7af7d706b8347548313fa3a3a15931f404533cc54fe01f39e830dd231/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165", size = 2421538 }, + { url = "https://files.pythonhosted.org/packages/05/f9/27e94c1b3eb29e6933b6986ffc5fa1177d2cd1f0c8efc5f02c91c9ac61de/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6", size = 2390661 }, + { url = "https://files.pythonhosted.org/packages/d9/d4/3c9735faa36ac591a4afcc2980d2691000506050b7a7e80bcfe44048daa7/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90", size = 2546710 }, + { url = "https://files.pythonhosted.org/packages/4c/fa/be89a49c640930180657482a74970cdcf6f7072c8d2471e1babe17a222dc/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85", size = 2349213 }, +] + +[[package]] +name = "lazy-loader" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6b/c875b30a1ba490860c93da4cabf479e03f584eba06fe5963f6f6644653d8/lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1", size = 15431 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097 }, +] + [[package]] name = "markupsafe" version = "3.0.2" @@ -767,6 +904,37 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, ] +[[package]] +name = "matplotlib" +version = "3.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/91/d49359a21893183ed2a5b6c76bec40e0b1dcbf8ca148f864d134897cfc75/matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0", size = 34799811 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/c1/23cfb566a74c696a3b338d8955c549900d18fe2b898b6e94d682ca21e7c2/matplotlib-3.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f2efccc8dcf2b86fc4ee849eea5dcaecedd0773b30f47980dc0cbeabf26ec84", size = 8180318 }, + { url = "https://files.pythonhosted.org/packages/6c/0c/02f1c3b66b30da9ee343c343acbb6251bef5b01d34fad732446eaadcd108/matplotlib-3.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3ddbba06a6c126e3301c3d272a99dcbe7f6c24c14024e80307ff03791a5f294e", size = 8051132 }, + { url = "https://files.pythonhosted.org/packages/b4/ab/8db1a5ac9b3a7352fb914133001dae889f9fcecb3146541be46bed41339c/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748302b33ae9326995b238f606e9ed840bf5886ebafcb233775d946aa8107a15", size = 8457633 }, + { url = "https://files.pythonhosted.org/packages/f5/64/41c4367bcaecbc03ef0d2a3ecee58a7065d0a36ae1aa817fe573a2da66d4/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80fcccbef63302c0efd78042ea3c2436104c5b1a4d3ae20f864593696364ac7", size = 8601031 }, + { url = "https://files.pythonhosted.org/packages/12/6f/6cc79e9e5ab89d13ed64da28898e40fe5b105a9ab9c98f83abd24e46d7d7/matplotlib-3.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55e46cbfe1f8586adb34f7587c3e4f7dedc59d5226719faf6cb54fc24f2fd52d", size = 9406988 }, + { url = "https://files.pythonhosted.org/packages/b1/0f/eed564407bd4d935ffabf561ed31099ed609e19287409a27b6d336848653/matplotlib-3.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:151d89cb8d33cb23345cd12490c76fd5d18a56581a16d950b48c6ff19bb2ab93", size = 8068034 }, + { url = "https://files.pythonhosted.org/packages/3e/e5/2f14791ff69b12b09e9975e1d116d9578ac684460860ce542c2588cb7a1c/matplotlib-3.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c26dd9834e74d164d06433dc7be5d75a1e9890b926b3e57e74fa446e1a62c3e2", size = 8218223 }, + { url = "https://files.pythonhosted.org/packages/5c/08/30a94afd828b6e02d0a52cae4a29d6e9ccfcf4c8b56cc28b021d3588873e/matplotlib-3.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:24853dad5b8c84c8c2390fc31ce4858b6df504156893292ce8092d190ef8151d", size = 8094985 }, + { url = "https://files.pythonhosted.org/packages/89/44/f3bc6b53066c889d7a1a3ea8094c13af6a667c5ca6220ec60ecceec2dabe/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f7878214d369d7d4215e2a9075fef743be38fa401d32e6020bab2dfabaa566", size = 8483109 }, + { url = "https://files.pythonhosted.org/packages/ba/c7/473bc559beec08ebee9f86ca77a844b65747e1a6c2691e8c92e40b9f42a8/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6929fc618cb6db9cb75086f73b3219bbb25920cb24cee2ea7a12b04971a4158", size = 8618082 }, + { url = "https://files.pythonhosted.org/packages/d8/e9/6ce8edd264c8819e37bbed8172e0ccdc7107fe86999b76ab5752276357a4/matplotlib-3.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c7818292a5cc372a2dc4c795e5c356942eb8350b98ef913f7fda51fe175ac5d", size = 9413699 }, + { url = "https://files.pythonhosted.org/packages/1b/92/9a45c91089c3cf690b5badd4be81e392ff086ccca8a1d4e3a08463d8a966/matplotlib-3.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4f23ffe95c5667ef8a2b56eea9b53db7f43910fa4a2d5472ae0f72b64deab4d5", size = 8139044 }, +] + [[package]] name = "matplotlib-inline" version = "0.1.7" @@ -788,6 +956,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/01/4d/23c4e4f09da849e127e9f123241946c23c1e30f45a88366879e064211815/mistune-3.1.3-py3-none-any.whl", hash = "sha256:1a32314113cff28aa6432e99e522677c8587fd83e3d51c29b82a52409c842bd9", size = 53410 }, ] +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + [[package]] name = "nbclient" version = "0.10.2" @@ -852,6 +1029,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195 }, ] +[[package]] +name = "networkx" +version = "3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406 }, +] + [[package]] name = "notebook" version = "7.4.3" @@ -908,6 +1094,149 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374 }, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.3.14" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/df/4b01f10069e23c641f116c62fc31e31e8dc361a153175d81561d15c8143b/nvidia_cublas_cu12-12.8.3.14-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:3f0e05e7293598cf61933258b73e66a160c27d59c4422670bf0b79348c04be44", size = 609620630 }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.57" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/6f/3683ecf4e38931971946777d231c2df00dd5c1c4c2c914c42ad8f9f4dca6/nvidia_cuda_cupti_cu12-12.8.57-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8e0b2eb847de260739bee4a3f66fac31378f4ff49538ff527a38a01a9a39f950", size = 10237547 }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.61" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d4/22/32029d4583f7b19cfe75c84399cbcfd23f2aaf41c66fc8db4da460104fff/nvidia_cuda_nvrtc_cu12-12.8.61-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a0fa9c2a21583105550ebd871bd76e2037205d56f33f128e69f6d2a55e0af9ed", size = 88024585 }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.57" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/f6/0e1ef31f4753a44084310ba1a7f0abaf977ccd810a604035abb43421c057/nvidia_cuda_runtime_cu12-12.8.57-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75342e28567340b7428ce79a5d6bb6ca5ff9d07b69e7ce00d2c7b4dc23eff0be", size = 954762 }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.7.1.26" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/dc/dc825c4b1c83b538e207e34f48f86063c88deaa35d46c651c7c181364ba2/nvidia_cudnn_cu12-9.7.1.26-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:6d011159a158f3cfc47bf851aea79e31bcff60d530b70ef70474c84cac484d07", size = 726851421 }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.41" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/26/b53c493c38dccb1f1a42e1a21dc12cba2a77fbe36c652f7726d9ec4aba28/nvidia_cufft_cu12-11.3.3.41-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:da650080ab79fcdf7a4b06aa1b460e99860646b176a43f6208099bdc17836b6a", size = 193118795 }, +] + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.0.11" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/9c/1f3264d0a84c8a031487fb7f59780fc78fa6f1c97776233956780e3dc3ac/nvidia_cufile_cu12-1.13.0.11-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:483f434c541806936b98366f6d33caef5440572de8ddf38d453213729da3e7d4", size = 1197801 }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.55" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/fc/7be5d0082507269bb04ac07cc614c84b78749efb96e8cf4100a8a1178e98/nvidia_curand_cu12-10.3.9.55-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8387d974240c91f6a60b761b83d4b2f9b938b7e0b9617bae0f0dafe4f5c36b86", size = 63618038 }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.2.55" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/08/953675873a136d96bb12f93b49ba045d1107bc94d2551c52b12fa6c7dec3/nvidia_cusolver_cu12-11.7.2.55-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4d1354102f1e922cee9db51920dba9e2559877cf6ff5ad03a00d853adafb191b", size = 260373342 }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.7.53" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/ab/31e8149c66213b846c082a3b41b1365b831f41191f9f40c6ddbc8a7d550e/nvidia_cusparse_cu12-12.5.7.53-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c1b61eb8c85257ea07e9354606b26397612627fdcd327bfd91ccf6155e7c86d", size = 292064180 }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/9a/72ef35b399b0e183bc2e8f6f558036922d453c4d8237dab26c666a04244b/nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e5c8a26c36445dd2e6812f1177978a24e2d37cacce7e090f297a688d1ec44f46", size = 156785796 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.26.2" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/ca/f42388aed0fddd64ade7493dbba36e1f534d4e6fdbdd355c6a90030ae028/nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:694cf3879a206553cc9d7dbda76b13efaf610fdb70a50cba303de1b0d1530ac6", size = 201319755 }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.61" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/f8/9d85593582bd99b8d7c65634d2304780aefade049b2b94d96e44084be90b/nvidia_nvjitlink_cu12-12.8.61-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:45fd79f2ae20bd67e8bc411055939049873bfd8fac70ff13bd4865e0b9bdab17", size = 39243473 }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.55" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/cd/0e8c51b2ae3a58f054f2e7fe91b82d201abfb30167f2431e9bd92d532f42/nvidia_nvtx_cu12-12.8.55-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2dd0780f1a55c21d8e06a743de5bd95653de630decfff40621dbde78cc307102", size = 89896 }, +] + +[[package]] +name = "opencv-python" +version = "4.11.0.86" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322 }, + { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197 }, + { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439 }, + { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597 }, + { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337 }, + { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044 }, +] + [[package]] name = "overrides" version = "7.7.0" @@ -1085,6 +1414,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, ] +[[package]] +name = "pyparsing" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120 }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1272,6 +1610,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b6/97/5a4b59697111c89477d20ba8a44df9ca16b41e737fa569d5ae8bff99e650/rpds_py-0.25.1-cp313-cp313t-win_amd64.whl", hash = "sha256:401ca1c4a20cc0510d3435d89c069fe0a9ae2ee6495135ac46bdd49ec0495763", size = 232218 }, ] +[[package]] +name = "scikit-image" +version = "0.25.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "imageio" }, + { name = "lazy-loader" }, + { name = "networkx" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "scipy" }, + { name = "tifffile" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/a8/3c0f256012b93dd2cb6fda9245e9f4bff7dc0486880b248005f15ea2255e/scikit_image-0.25.2.tar.gz", hash = "sha256:e5a37e6cd4d0c018a7a55b9d601357e3382826d3888c10d0213fc63bff977dde", size = 22693594 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/7c/9814dd1c637f7a0e44342985a76f95a55dd04be60154247679fd96c7169f/scikit_image-0.25.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7efa888130f6c548ec0439b1a7ed7295bc10105458a421e9bf739b457730b6da", size = 13921841 }, + { url = "https://files.pythonhosted.org/packages/84/06/66a2e7661d6f526740c309e9717d3bd07b473661d5cdddef4dd978edab25/scikit_image-0.25.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dd8011efe69c3641920614d550f5505f83658fe33581e49bed86feab43a180fc", size = 13196862 }, + { url = "https://files.pythonhosted.org/packages/4e/63/3368902ed79305f74c2ca8c297dfeb4307269cbe6402412668e322837143/scikit_image-0.25.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28182a9d3e2ce3c2e251383bdda68f8d88d9fff1a3ebe1eb61206595c9773341", size = 14117785 }, + { url = "https://files.pythonhosted.org/packages/cd/9b/c3da56a145f52cd61a68b8465d6a29d9503bc45bc993bb45e84371c97d94/scikit_image-0.25.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8abd3c805ce6944b941cfed0406d88faeb19bab3ed3d4b50187af55cf24d147", size = 14977119 }, + { url = "https://files.pythonhosted.org/packages/8a/97/5fcf332e1753831abb99a2525180d3fb0d70918d461ebda9873f66dcc12f/scikit_image-0.25.2-cp313-cp313-win_amd64.whl", hash = "sha256:64785a8acefee460ec49a354706db0b09d1f325674107d7fa3eadb663fb56d6f", size = 12885116 }, + { url = "https://files.pythonhosted.org/packages/10/cc/75e9f17e3670b5ed93c32456fda823333c6279b144cd93e2c03aa06aa472/scikit_image-0.25.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:330d061bd107d12f8d68f1d611ae27b3b813b8cdb0300a71d07b1379178dd4cd", size = 13862801 }, +] + [[package]] name = "scipy" version = "1.15.3" @@ -1360,13 +1722,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, ] +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353 }, +] + [[package]] name = "terminado" version = "0.18.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ptyprocess", marker = "os_name != 'nt'" }, - { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pywinpty", marker = "(os_name == 'nt' and platform_machine != 'aarch64' and sys_platform == 'linux') or (os_name == 'nt' and sys_platform != 'darwin' and sys_platform != 'linux')" }, { name = "tornado" }, ] sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701 } @@ -1374,6 +1748,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154 }, ] +[[package]] +name = "tifffile" +version = "2025.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/cc/deed7dd69d4029adba8e95214f8bf65fca8bc6b8426e27d056e1de624206/tifffile-2025.6.1.tar.gz", hash = "sha256:63cff7cf7305c26e3f3451c0b05fd95a09252beef4f1663227d4b70cb75c5fdb", size = 369769 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/77/7f7dfcf2d847c1c1c63a2d4157c480eb4c74e4aa56e844008795ff01f86d/tifffile-2025.6.1-py3-none-any.whl", hash = "sha256:ff7163f1aaea519b769a2ac77c43be69e7d83e5b5d5d6a676497399de50535e5", size = 230624 }, +] + [[package]] name = "tinycss2" version = "1.4.0" @@ -1386,6 +1772,43 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610 }, ] +[[package]] +name = "torch" +version = "2.7.1+cu128" +source = { registry = "https://download.pytorch.org/whl/cu128" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "setuptools" }, + { name = "sympy" }, + { name = "triton", marker = "sys_platform == 'linux'" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:d56d29a6ad7758ba5173cc2b0c51c93e126e2b0a918e874101dc66545283967f" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9560425f9ea1af1791507e8ca70d5b9ecf62fed7ca226a95fcd58d0eb2cca78f" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:500ad5b670483f62d4052e41948a3fb19e8c8de65b99f8d418d879cbb15a82d6" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:f112465fdf42eb1297c6dddda1a8b7f411914428b704e1b8a47870c52e290909" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:c355db49c218ada70321d5c5c9bb3077312738b99113c8f3723ef596b554a7b9" }, + { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:e27e5f7e74179fb5d814a0412e5026e4b50c9e0081e9050bc4c28c992a276eb1" }, +] + [[package]] name = "tornado" version = "6.5.1" @@ -1414,6 +1837,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, ] +[[package]] +name = "triton" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools", marker = "sys_platform == 'linux' or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/1f/dfb531f90a2d367d914adfee771babbd3f1a5b26c3f5fbc458dee21daa78/triton-3.3.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b89d846b5a4198317fec27a5d3a609ea96b6d557ff44b56c23176546023c4240", size = 155673035 }, + { url = "https://files.pythonhosted.org/packages/28/71/bd20ffcb7a64c753dc2463489a61bf69d531f308e390ad06390268c4ea04/triton-3.3.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3198adb9d78b77818a5388bff89fa72ff36f9da0bc689db2f0a651a67ce6a42", size = 155735832 }, +] + [[package]] name = "types-python-dateutil" version = "2.9.0.20250516" diff --git a/wb.py b/wb.py new file mode 100644 index 0000000..58aab0d --- /dev/null +++ b/wb.py @@ -0,0 +1,242 @@ +import torch +import numpy as np +import imageio.v2 as imageio +import time +import os + +# --- Configuration & Constants --- +# These are the same as the NumPy version but will be used on PyTorch tensors. +DEFAULT_T_THRESHOLD = 0.1321 +DEFAULT_MU_STEP = 0.0312 +SCALE_FACTOR = 255.0 +DEFAULT_A_THRESHOLD = 0.8 / SCALE_FACTOR +DEFAULT_B_THRESHOLD = 0.15 / SCALE_FACTOR +DEFAULT_MAX_ITERATIONS = 60 + +# --- PyTorch Core Algorithm Functions --- + +def rgb_to_yuv_torch(image_tensor: torch.Tensor, matrix_torch: torch.Tensor) -> torch.Tensor: + """ + Converts an RGB image tensor to the paper's YUV color space using PyTorch. + + Args: + image_tensor (torch.Tensor): A (H, W, 3) tensor on a target device. + matrix_torch (torch.Tensor): The 3x3 conversion matrix on the same device. + + Returns: + torch.Tensor: An (H, W, 3) YUV tensor on the same device. + """ + return torch.matmul(image_tensor, matrix_torch) + +def k_function_torch(error: float, a: float, b: float) -> float: + """ + Implements the non-linear error weighting function K(x) from Eq. 16. + This function remains on the CPU as it operates on a single scalar value. + """ + abs_error = abs(error) + sign = np.sign(error) + + if abs_error >= a: + return 2.0 * sign + elif abs_error >= b: + return 1.0 * sign + else: + return 0.0 + +def huo_awb_core_torch(image_tensor: torch.Tensor, + t_threshold: float, + mu: float, + a: float, + b: float, + max_iter: int, + device: torch.device) -> torch.Tensor: + """ + Performs the core iterative AWB algorithm using PyTorch tensors on a specified device. + + Args: + image_tensor (torch.Tensor): Input image as a (H, W, 3) float32 tensor on the target device. + (other params): Algorithm configuration constants. + device (torch.device): The device (e.g., 'cuda' or 'cpu') to run on. + + Returns: + torch.Tensor: A (3,) tensor containing the final calculated [R, G, B] gains. + """ + # Create the YUV conversion matrix and gains tensor on the target device + yuv_matrix = torch.tensor([ + [0.299, 0.587, 0.114], + [-0.299, -0.587, 0.886], + [0.701, -0.587, -0.114] + ], dtype=torch.float32, device=device).T + + gains = torch.tensor([1.0, 1.0, 1.0], dtype=torch.float32, device=device) + + print(f"Starting iterative AWB on device: '{device.type}'...") + for i in range(max_iter): + # 1. Apply current gains to the image (all on GPU/device) + balanced_image = torch.clamp(image_tensor * gains, 0.0, 1.0) + + # 2. Convert to YUV (on GPU/device) + yuv_image = rgb_to_yuv_torch(balanced_image, yuv_matrix) + Y, U, V = yuv_image.unbind(dim=-1) + + # 3. Identify gray points (on GPU/device) + # Luminance mask to exclude overly dark or bright pixels + luminance_mask = (Y > 0.1) & (Y < 0.95) + + if not torch.any(luminance_mask): + print(f"Iteration {i+1}: No pixels in luminance range. Stopping.") + break + + Y_masked = Y[luminance_mask] + U_masked = U[luminance_mask] + V_masked = V[luminance_mask] + + # Criterion from Eq. 10 + gray_mask_indices = (torch.abs(U_masked) + torch.abs(V_masked)) / Y_masked < t_threshold + + gray_points_U = U_masked[gray_mask_indices] + + num_gray_points = gray_points_U.shape[0] + if num_gray_points < 50: # Use a higher threshold for large images + print(f"Iteration {i+1}: Not enough gray points found ({num_gray_points}). Stopping.") + break + + # 4. Calculate average chrominance (reduction on GPU/device) + u_mean = torch.mean(gray_points_U) + v_mean = torch.mean(V_masked[gray_mask_indices]) + + # Bring the scalar results back to CPU for control flow + u_mean_cpu = u_mean.item() + v_mean_cpu = v_mean.item() + + # Check for convergence + if abs(u_mean_cpu) < b and abs(v_mean_cpu) < b: + print(f"Iteration {i+1}: Converged. u_mean={u_mean_cpu:.4f}, v_mean={v_mean_cpu:.4f}") + break + + # 5. Determine adjustment (logic on CPU, gain update on GPU/device) + if abs(u_mean_cpu) > abs(v_mean_cpu): + error = -u_mean_cpu + adjustment = mu * k_function_torch(error, a, b) + gains[2] += adjustment + print(f"Iter {i+1}: Adjusting B-gain. u_mean={u_mean_cpu:.4f}, v_mean={v_mean_cpu:.4f}, B-adj={adjustment:.4f}") + else: + error = -v_mean_cpu + adjustment = mu * k_function_torch(error, a, b) + gains[0] += adjustment + print(f"Iter {i+1}: Adjusting R-gain. u_mean={u_mean_cpu:.4f}, v_mean={v_mean_cpu:.4f}, R-adj={adjustment:.4f}") + + print(f"Final gains: R={gains[0].item():.4f}, G={gains[1].item():.4f}, B={gains[2].item():.4f}") + return gains + +# --- Main Public Function --- + +def apply_huo_awb_torch(image_path: str, output_path: str, **kwargs): + """ + Loads a high-resolution 16-bit TIFF, applies the Huo et al. AWB algorithm + using PyTorch for high performance, and saves the result. + + Args: + image_path (str): Path to the input 16-bit TIFF image. + output_path (str): Path to save the white-balanced 16-bit TIFF image. + **kwargs: Optional algorithm parameters. + """ + start_time = time.perf_counter() + + # 1. Select Device (GPU if available, otherwise CPU) + # if torch.cuda.is_available(): + # device = torch.device('cuda') + # elif torch.backends.mps.is_available(): # For Apple Silicon + # device = torch.device('mps') + # else: + device = torch.device('cpu') + print(f"Using device: {device}") + + # 2. Load Image with imageio (on CPU) + print(f"Loading image from: {image_path}") + try: + image_np = imageio.imread(image_path) + except FileNotFoundError: + print(f"Error: The file '{image_path}' was not found.") + return + + load_time = time.perf_counter() + print(f"Image loaded in {load_time - start_time:.2f} seconds.") + + # 3. Pre-process and Move to Device + # Normalize to float32 and convert to PyTorch tensor + image_float_np = image_np.astype(np.float32) / 65535.0 + # Move the large image tensor to the selected device + image_tensor = torch.from_numpy(image_float_np).to(device) + + transfer_time = time.perf_counter() + print(f"Data transferred to {device.type} in {transfer_time - load_time:.2f} seconds.") + + # 4. Run the core algorithm on the device + params = { + 't_threshold': kwargs.get('t_threshold', DEFAULT_T_THRESHOLD), + 'mu': kwargs.get('mu', DEFAULT_MU_STEP), + 'a': kwargs.get('a', DEFAULT_A_THRESHOLD), + 'b': kwargs.get('b', DEFAULT_B_THRESHOLD), + 'max_iter': kwargs.get('max_iter', DEFAULT_MAX_ITERATIONS), + } + gains = huo_awb_core_torch(image_tensor, device=device, **params) + + process_time = time.perf_counter() + print(f"AWB processing finished in {process_time - transfer_time:.2f} seconds.") + + # 5. Apply final gains, move back to CPU, and save + corrected_image_tensor = torch.clamp(image_tensor * gains, 0.0, 1.0) + + # Move tensor back to CPU for conversion to NumPy + corrected_image_np = corrected_image_tensor.cpu().numpy() + + # Convert back to 16-bit integer for saving + corrected_image_uint16 = (corrected_image_np * 65535).astype(np.uint16) + + print(f"Saving corrected image to: {output_path}") + imageio.imwrite(output_path, corrected_image_uint16) + + end_time = time.perf_counter() + print(f"Image saved. Total time: {end_time - start_time:.2f} seconds.") + + +# --- Example Usage --- + +if __name__ == '__main__': + # Create a dummy 50MP, 16-bit TIFF with a bluish cast + # 50MP is approx. 8660 x 5773 pixels + h, w = 5773, 8660 + print(f"Creating a sample {h*w/1e6:.1f}MP 16-bit TIFF image with a bluish cast...") + + # A gray gradient (create a smaller version and resize to save memory/time) + small_w = w // 10 + gray_base_small = np.linspace(0.2, 0.8, small_w, dtype=np.float32) + gray_image_small = np.tile(gray_base_small, (h // 10, 1)) + + # Use PyTorch to resize efficiently if possible, otherwise numpy/scipy + try: + import torch.nn.functional as F + gray_image = F.interpolate( + torch.from_numpy(gray_image_small)[None, None, ...], + size=(h, w), + mode='bilinear', + align_corners=False + )[0, 0, ...].numpy() + except (ImportError, ModuleNotFoundError): + print("Resizing with a simpler method as full torch/cv2 not available for generation.") + gray_image = np.tile(np.linspace(0.2, 0.8, w, dtype=np.float32), (h, 1)) + + image_float = np.stack([gray_image, gray_image, gray_image], axis=-1) + + # Apply a bluish cast (decrease R, increase B) + blue_cast = np.array([0.85, 1.0, 1.15], dtype=np.float32) + image_float_cast = np.clip(image_float * blue_cast, 0, 1) + + image_uint16_cast = (image_float_cast * 65535).astype(np.uint16) + + input_filename = "/home/dubey/projects/filmsim/test_images/v1.3output/filmscan/04_portra_400_border_v3colorxyz.tiff" + output_filename = "/home/dubey/projects/filmsim/test_images/v1.3output/filmscan/04_portra_400_border_v3colorxyz_corrected.tiff" + + # Run the white balance algorithm on the sample image + apply_huo_awb_torch(input_filename, output_filename) \ No newline at end of file