Commit V1.0
This commit is contained in:
10
.gitattributes
vendored
Normal file
10
.gitattributes
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
*.jpg filter=lfs diff=lfs merge=lfs -binary
|
||||
*.jpeg filter=lfs diff=lfs merge=lfs -binary
|
||||
*.ARW filter=lfs diff=lfs merge=lfs -binary
|
||||
*.DNG filter=lfs diff=lfs merge=lfs -binary
|
||||
*.TIFF filter=lfs diff=lfs merge=lfs -binary
|
||||
*.tiff filter=lfs diff=lfs merge=lfs -binary
|
||||
*.tif filter=lfs diff=lfs merge=lfs -binary
|
||||
*.TIF filter=lfs diff=lfs merge=lfs -binary
|
||||
*.pdf filter=lfs diff=lfs merge=lfs -binary
|
||||
*.JPG filter=lfs diff=lfs merge=lfs -binary
|
13
.gitignore
vendored
Normal file
13
.gitignore
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Python-generated files
|
||||
__pycache__/
|
||||
*.py[oc]
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
*.egg-info
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
tools/
|
||||
*.tiff
|
||||
*.pp3
|
1
.python-version
Normal file
1
.python-version
Normal file
@ -0,0 +1 @@
|
||||
3.13
|
162
Makefile
Normal file
162
Makefile
Normal file
@ -0,0 +1,162 @@
|
||||
# Makefile for image processing pipeline
|
||||
|
||||
# --- Configuration ---
|
||||
# Executables (assuming they are in the current directory or your PATH)
|
||||
FILMCOLOR_EXE := ./filmcolor
|
||||
FILMSCAN_EXE := ./filmscan
|
||||
FILMGRAIN_EXE := ./filmgrain
|
||||
|
||||
# Fixed Paths for this project
|
||||
RAW_DIR := ./test_images/RAW
|
||||
SIM_DATA_FILE := ./sim_data/portra_400.json
|
||||
OUTPUT_BASE_DIR := ./test_images/v1.0output
|
||||
|
||||
# Output subdirectory names (used to construct paths)
|
||||
FILMCOLOR_SUBDIR := filmcolor
|
||||
FILMSCAN_SUBDIR := filmscan
|
||||
FILMGRAIN_RGB_SUBDIR := filmgrainrgb
|
||||
FILMGRAIN_MONO_SUBDIR := filmgrainmono
|
||||
|
||||
# --- Helper: Define all output directories ---
|
||||
# These are the actual directory paths
|
||||
DIR_FILMCOLOR := $(OUTPUT_BASE_DIR)/$(FILMCOLOR_SUBDIR)
|
||||
DIR_FILMSCAN := $(OUTPUT_BASE_DIR)/$(FILMSCAN_SUBDIR)
|
||||
DIR_FILMGRAIN_RGB := $(OUTPUT_BASE_DIR)/$(FILMGRAIN_RGB_SUBDIR)
|
||||
DIR_FILMGRAIN_MONO:= $(OUTPUT_BASE_DIR)/$(FILMGRAIN_MONO_SUBDIR)
|
||||
|
||||
ALL_OUTPUT_DIRS := $(DIR_FILMCOLOR) $(DIR_FILMSCAN) $(DIR_FILMGRAIN_RGB) $(DIR_FILMGRAIN_MONO)
|
||||
|
||||
# --- Input Handling for single DNG processing ---
|
||||
# INPUT_DNG_PATH is expected for targets like 'full_process'
|
||||
# It's not used directly by 'run_all_test_process' but by the targets it calls if you invoke them manually.
|
||||
|
||||
# Derive BASENAME if INPUT_DNG_PATH is provided (for single image processing)
|
||||
# This block is evaluated if INPUT_DNG_PATH is set when make is invoked.
|
||||
ifdef INPUT_DNG_PATH
|
||||
# Check if the input DNG file exists (only if INPUT_DNG_PATH is provided)
|
||||
ifeq ($(wildcard $(INPUT_DNG_PATH)),)
|
||||
$(error Input DNG file not found: $(INPUT_DNG_PATH))
|
||||
endif
|
||||
FILENAME_WITH_EXT_SINGLE := $(notdir $(INPUT_DNG_PATH))
|
||||
BASENAME_SINGLE := $(basename $(FILENAME_WITH_EXT_SINGLE)) # e.g., "09" from "09.DNG"
|
||||
|
||||
# Define specific output files for the single INPUT_DNG_PATH
|
||||
# These are used by 'full_process' target when INPUT_DNG_PATH is specified
|
||||
SPECIFIC_FILMCOLOR_OUT := $(DIR_FILMCOLOR)/$(BASENAME_SINGLE).tiff
|
||||
SPECIFIC_FILMSCAN_OUT := $(DIR_FILMSCAN)/$(BASENAME_SINGLE).tiff
|
||||
SPECIFIC_FILMGRAIN_RGB_OUT := $(DIR_FILMGRAIN_RGB)/$(BASENAME_SINGLE).tiff
|
||||
SPECIFIC_FILMGRAIN_MONO_OUT := $(DIR_FILMGRAIN_MONO)/$(BASENAME_SINGLE).tiff
|
||||
endif
|
||||
|
||||
# --- Batch Processing: Find all DNGs and define their targets ---
|
||||
# Find all .DNG and .dng files in the RAW_DIR
|
||||
DNG_FILES_IN_RAW := $(wildcard $(RAW_DIR)/*.DNG) $(wildcard $(RAW_DIR)/*.dng)
|
||||
|
||||
# Extract unique basenames from these DNG files (e.g., "09", "another_image")
|
||||
# $(notdir path/file.ext) -> file.ext
|
||||
# $(basename file.ext) -> file (or file.part if original was file.part.ext)
|
||||
# $(sort ... ) also removes duplicates
|
||||
ALL_BASENAMES := $(sort $(foreach dng_file,$(DNG_FILES_IN_RAW),$(basename $(notdir $(dng_file)))))
|
||||
|
||||
# Generate lists of all final output files for the run_all_test_process target
|
||||
ALL_FINAL_RGB_OUTPUTS := $(foreach bn,$(ALL_BASENAMES),$(DIR_FILMGRAIN_RGB)/$(bn).tiff)
|
||||
ALL_FINAL_MONO_OUTPUTS := $(foreach bn,$(ALL_BASENAMES),$(DIR_FILMGRAIN_MONO)/$(bn).tiff)
|
||||
TARGETS_FOR_RUN_ALL := $(ALL_FINAL_RGB_OUTPUTS) $(ALL_FINAL_MONO_OUTPUTS)
|
||||
|
||||
|
||||
# --- Targets ---
|
||||
.DEFAULT_GOAL := help
|
||||
.PHONY: all full_process run_all_test_process create_dirs help
|
||||
.SECONDEXPANSION: # Allow use of $$(@F) etc. in static pattern rules if needed, though not strictly used here
|
||||
|
||||
# Target to create all necessary output directories
|
||||
# This is a prerequisite for the first processing step.
|
||||
create_dirs:
|
||||
@echo "Creating output directories if they don't exist..."
|
||||
@mkdir -p $(ALL_OUTPUT_DIRS)
|
||||
@echo "Output directories ensured: $(ALL_OUTPUT_DIRS)"
|
||||
|
||||
# --- Static Pattern Rules for image processing steps ---
|
||||
# These rules define how to build .tiff files in output directories from .DNG/.dng files in RAW_DIR
|
||||
# The '%' is a wildcard that matches the basename of the file.
|
||||
|
||||
# 1. Filmcolor (handles both .DNG and .dng inputs)
|
||||
$(DIR_FILMCOLOR)/%.tiff: $(RAW_DIR)/%.DNG $(SIM_DATA_FILE) create_dirs
|
||||
@echo "--- [1. Filmcolor] ---"
|
||||
@echo " Input DNG: $<"
|
||||
@echo " Sim Data: $(SIM_DATA_FILE)"
|
||||
@echo " Output: $@"
|
||||
$(FILMCOLOR_EXE) "$<" "$(SIM_DATA_FILE)" "$@"
|
||||
|
||||
$(DIR_FILMCOLOR)/%.tiff: $(RAW_DIR)/%.dng $(SIM_DATA_FILE) create_dirs
|
||||
@echo "--- [1. Filmcolor] ---"
|
||||
@echo " Input dng: $<"
|
||||
@echo " Sim Data: $(SIM_DATA_FILE)"
|
||||
@echo " Output: $@"
|
||||
$(FILMCOLOR_EXE) "$<" "$(SIM_DATA_FILE)" "$@"
|
||||
|
||||
# 2. Filmscan
|
||||
$(DIR_FILMSCAN)/%.tiff: $(DIR_FILMCOLOR)/%.tiff
|
||||
@echo "--- [2. Filmscan] ---"
|
||||
@echo " Input: $<"
|
||||
@echo " Output: $@"
|
||||
$(FILMSCAN_EXE) "$<" "$@"
|
||||
|
||||
# 3. Filmgrain RGB
|
||||
$(DIR_FILMGRAIN_RGB)/%.tiff: $(DIR_FILMSCAN)/%.tiff
|
||||
@echo "--- [3. Filmgrain RGB] ---"
|
||||
@echo " Input: $<"
|
||||
@echo " Output: $@"
|
||||
$(FILMGRAIN_EXE) "$<" "$@"
|
||||
|
||||
# 4. Filmgrain Mono
|
||||
$(DIR_FILMGRAIN_MONO)/%.tiff: $(DIR_FILMSCAN)/%.tiff
|
||||
@echo "--- [4. Filmgrain Mono] ---"
|
||||
@echo " Input: $<"
|
||||
@echo " Output: $@"
|
||||
$(FILMGRAIN_EXE) "$<" "$@" --mono
|
||||
|
||||
|
||||
# --- Main User Targets ---
|
||||
|
||||
# Process a single image specified by INPUT_DNG_PATH
|
||||
full_process: $(SPECIFIC_FILMGRAIN_RGB_OUT) $(SPECIFIC_FILMGRAIN_MONO_OUT)
|
||||
ifndef INPUT_DNG_PATH
|
||||
$(error INPUT_DNG_PATH must be set for 'make full_process'. Usage: make full_process INPUT_DNG_PATH=/path/to/image.DNG)
|
||||
endif
|
||||
@echo "----------------------------------------------------"
|
||||
@echo "SUCCESS: Full processing complete for $(BASENAME_SINGLE)"
|
||||
@echo "RGB Output: $(SPECIFIC_FILMGRAIN_RGB_OUT)"
|
||||
@echo "Mono Output: $(SPECIFIC_FILMGRAIN_MONO_OUT)"
|
||||
@echo "----------------------------------------------------"
|
||||
|
||||
# Process all DNG images in test_images/RAW/
|
||||
run_all_test_process: $(TARGETS_FOR_RUN_ALL)
|
||||
@echo "===================================================="
|
||||
@echo "SUCCESS: All test images in $(RAW_DIR) processed."
|
||||
@echo "Processed $(words $(ALL_BASENAMES)) images: $(ALL_BASENAMES)"
|
||||
@echo "===================================================="
|
||||
|
||||
all:
|
||||
@echo "Common targets: 'make full_process INPUT_DNG_PATH=...', 'make run_all_test_process'"
|
||||
|
||||
|
||||
# Help message
|
||||
help:
|
||||
@echo "Makefile for Image Processing Pipeline"
|
||||
@echo ""
|
||||
@echo "Usage: make <target> [INPUT_DNG_PATH=<path_to_dng_file>]"
|
||||
@echo ""
|
||||
@echo "Main Targets:"
|
||||
@echo " full_process - Run the entire pipeline for a single image."
|
||||
@echo " Requires: INPUT_DNG_PATH=./test_images/RAW/your_image.DNG"
|
||||
@echo " run_all_test_process - Run the entire pipeline for ALL .DNG/.dng images"
|
||||
@echo " found in $(RAW_DIR)/"
|
||||
@echo ""
|
||||
@echo "Other Targets:"
|
||||
@echo " create_dirs - Ensure all output directories exist."
|
||||
@echo " help - Show this help message."
|
||||
@echo ""
|
||||
@echo "Examples:"
|
||||
@echo " make full_process INPUT_DNG_PATH=./test_images/RAW/09.DNG"
|
||||
@echo " make run_all_test_process"
|
23
README.md
Normal file
23
README.md
Normal file
@ -0,0 +1,23 @@
|
||||
# filmsim
|
||||
|
||||
This is an exploration into a few things:
|
||||
- An LLM based project (I do minimal coding)
|
||||
- Film simulation
|
||||
- Real life film capture
|
||||
|
||||
This project seeks to create a fast, "batteries included", film simulation package that walks the user through a film simulation.
|
||||
|
||||
Currently we have the following pipeline components:
|
||||
- `filmcolor` - Takes in a digital color positive (picture from your digital camera) and outputs a simulated film negative based on the film stock chosen
|
||||
- `filmscan` - Simulates the film scan and negative reversal process by referencing the "[Negadoctor](https://github.com/darktable-org/darktable/blob/master/src/iop/negadoctor.c)" module from [Darktable](https://www.darktable.org/), but adding in a few auto features for "batteries included"
|
||||
- `filmgrain` - Adds grain based on the filmgrain method by [Zhang et al. (2023)](https://dl.acm.org/doi/10.1145/3592127) in either RGB or monochrome
|
||||
|
||||
All scripts are designed to take in TIFF/PNG/JPG, and output TIFF/PNG/JPG. TIFFs are output in uncompressed 16-bit.
|
||||
|
||||
`filmcolor` can additionally take in Sony ARW and various DNG camera RAW files.
|
||||
|
||||
All scripts are self contained and portable.
|
||||
|
||||
Details about each script can be found in their respective readmes.
|
||||
|
||||
This project also contains test input images and outputs at various stages of development.
|
BIN
docs/2017_Newson_film_grain.pdf
(Stored with Git LFS)
Normal file
BIN
docs/2017_Newson_film_grain.pdf
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/A Model for Simulating the Photographic Development Process on Digital Images.pdf
(Stored with Git LFS)
Normal file
BIN
docs/A Model for Simulating the Photographic Development Process on Digital Images.pdf
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/Simulation of film media.pdf
(Stored with Git LFS)
Normal file
BIN
docs/Simulation of film media.pdf
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
docs/e4050_portra_400.pdf
(Stored with Git LFS)
Normal file
BIN
docs/e4050_portra_400.pdf
(Stored with Git LFS)
Normal file
Binary file not shown.
1093
docs/negadoctor.md
Normal file
1093
docs/negadoctor.md
Normal file
File diff suppressed because it is too large
Load Diff
53
docs/negadoctor_cl.md
Normal file
53
docs/negadoctor_cl.md
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
This file is part of darktable,
|
||||
Copyright (C) 2020 darktable developers.
|
||||
|
||||
darktable is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
darktable is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with darktable. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
||||
kernel void
|
||||
negadoctor (read_only image2d_t in, write_only image2d_t out, int width, int height,
|
||||
const float4 Dmin, const float4 wb_high, const float4 offset,
|
||||
const float exposure, const float black, const float gamma, const float soft_clip, const float soft_clip_comp)
|
||||
{
|
||||
const unsigned int x = get_global_id(0);
|
||||
const unsigned int y = get_global_id(1);
|
||||
|
||||
if(x >= width || y >= height) return;
|
||||
|
||||
float4 i = read_imagef(in, sampleri, (int2)(x, y));
|
||||
float4 o;
|
||||
|
||||
// Convert transmission to density using Dmin as a fulcrum
|
||||
o = -native_log10(Dmin / fmax(i, (float4)2.3283064365386963e-10f)); // threshold to -32 EV
|
||||
|
||||
// Correct density in log space
|
||||
o = wb_high * o + offset;
|
||||
|
||||
// Print density on paper : ((1 - 10^corrected_de + black) * exposure)^gamma rewritten for FMA
|
||||
o = -((float4)exposure * native_exp10(o) + (float4)black);
|
||||
o = dtcl_pow(fmax(o, (float4)0.0f), gamma); // note : this is always > 0
|
||||
|
||||
// Compress highlights and clip negatives. from https://lists.gnu.org/archive/html/openexr-devel/2005-03/msg00009.html
|
||||
o = (o > (float4)soft_clip) ? soft_clip + ((float4)1.0f - native_exp(-(o - (float4)soft_clip) / (float4)soft_clip_comp)) * (float4)soft_clip_comp
|
||||
: o;
|
||||
|
||||
// Copy alpha
|
||||
o.w = i.w;
|
||||
|
||||
write_imagef(out, (int2)(x, y), o);
|
||||
}
|
BIN
docs/sigg97.pdf
(Stored with Git LFS)
Normal file
BIN
docs/sigg97.pdf
(Stored with Git LFS)
Normal file
Binary file not shown.
725
filmcolor
Executable file
725
filmcolor
Executable file
@ -0,0 +1,725 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# dependencies = [
|
||||
# "numpy",
|
||||
# "scipy",
|
||||
# "Pillow",
|
||||
# "imageio",
|
||||
# "rawpy",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Single-file Python script for Film Stock Color Emulation based on Datasheet CSV.
|
||||
|
||||
Focuses on color transformation, applying effects derived from datasheet parameters.
|
||||
Assumes input image is in linear RGB format. Excludes film grain simulation.
|
||||
|
||||
Dependencies: numpy, imageio, scipy
|
||||
Installation: pip install numpy imageio scipy Pillow
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import numpy as np
|
||||
import imageio.v3 as iio
|
||||
from scipy.interpolate import interp1d
|
||||
from scipy.ndimage import gaussian_filter
|
||||
import rawpy
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
# --- Configuration ---
|
||||
# Small epsilon to prevent log(0) or division by zero errors
|
||||
EPSILON = 1e-10
|
||||
|
||||
from typing import Optional, List
|
||||
|
||||
|
||||
class Info:
|
||||
name: str
|
||||
description: str
|
||||
format_mm: int
|
||||
version: str
|
||||
|
||||
def __init__(
|
||||
self, name: str, description: str, format_mm: int, version: str
|
||||
) -> None:
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.format_mm = format_mm
|
||||
self.version = version
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Info(name={self.name}, description={self.description}, "
|
||||
f"format_mm={self.format_mm}, version={self.version})"
|
||||
)
|
||||
|
||||
|
||||
class Balance:
|
||||
r_shift: float
|
||||
g_shift: float
|
||||
b_shift: float
|
||||
|
||||
def __init__(self, r_shift: float, g_shift: float, b_shift: float) -> None:
|
||||
self.r_shift = r_shift
|
||||
self.g_shift = g_shift
|
||||
self.b_shift = b_shift
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Balance(r_shift={self.r_shift:.3f}, g_shift={self.g_shift:.3f}, b_shift={self.b_shift:.3f})"
|
||||
)
|
||||
|
||||
|
||||
class Gamma:
|
||||
r_factor: float
|
||||
g_factor: float
|
||||
b_factor: float
|
||||
|
||||
def __init__(self, r_factor: float, g_factor: float, b_factor: float) -> None:
|
||||
self.r_factor = r_factor
|
||||
self.g_factor = g_factor
|
||||
self.b_factor = b_factor
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Gamma(r_factor={self.r_factor:.3f}, g_factor={self.g_factor:.3f}, b_factor={self.b_factor:.3f})"
|
||||
)
|
||||
|
||||
|
||||
class Processing:
|
||||
gamma: Gamma
|
||||
balance: Balance
|
||||
|
||||
def __init__(self, gamma: Gamma, balance: Balance) -> None:
|
||||
self.gamma = gamma
|
||||
self.balance = balance
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Processing(gamma=({self.gamma.r_factor:.3f}, {self.gamma.g_factor:.3f}, {self.gamma.b_factor:.3f}), "
|
||||
f"balance=({self.balance.r_shift:.3f}, {self.balance.g_shift:.3f}, {self.balance.b_shift:.3f}))"
|
||||
)
|
||||
|
||||
|
||||
class Couplers:
|
||||
amount: float
|
||||
diffusion_um: float
|
||||
|
||||
def __init__(self, amount: float, diffusion_um: float) -> None:
|
||||
self.amount = amount
|
||||
self.diffusion_um = diffusion_um
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Couplers(amount={self.amount:.3f}, diffusion_um={self.diffusion_um:.1f})"
|
||||
|
||||
|
||||
class HDCurvePoint:
|
||||
d: Optional[float]
|
||||
r: float
|
||||
g: float
|
||||
b: float
|
||||
|
||||
def __init__(self, d: Optional[float], r: float, g: float, b: float) -> None:
|
||||
self.d = d
|
||||
self.r = r
|
||||
self.g = g
|
||||
self.b = b
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"HDCurvePoint(d={self.d}, r={self.r:.3f}, g={self.g:.3f}, b={self.b:.3f})"
|
||||
|
||||
|
||||
class SpectralSensitivityCurvePoint:
|
||||
wavelength: float
|
||||
y: float
|
||||
m: float
|
||||
c: float
|
||||
|
||||
def __init__(self, wavelength: float, y: float, m: float, c: float) -> None:
|
||||
self.wavelength = wavelength
|
||||
self.y = y
|
||||
self.m = m
|
||||
self.c = c
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"SpectralSensitivityCurvePoint(wavelength={self.wavelength:.1f}, y={self.y:.3f}, m={self.m:.3f}, c={self.c:.3f})"
|
||||
|
||||
|
||||
class RGBValue:
|
||||
r: float
|
||||
g: float
|
||||
b: float
|
||||
|
||||
def __init__(self, r: float, g: float, b: float) -> None:
|
||||
self.r = r
|
||||
self.g = g
|
||||
self.b = b
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"RGBValue(r={self.r:.3f}, g={self.g:.3f}, b={self.b:.3f})"
|
||||
|
||||
|
||||
class Curves:
|
||||
hd: List[HDCurvePoint]
|
||||
spectral_sensitivity: List[SpectralSensitivityCurvePoint]
|
||||
|
||||
def __init__(self, hd: List[HDCurvePoint], spectral_sensitivity: List[SpectralSensitivityCurvePoint]) -> None:
|
||||
self.hd = hd
|
||||
self.spectral_sensitivity = spectral_sensitivity
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Curves(hd={',\n'.join(repr(point) for point in self.hd)}, spectral_sensitivity={',\n'.join(repr(point) for point in self.spectral_sensitivity)})"
|
||||
|
||||
|
||||
class Halation:
|
||||
strength: RGBValue
|
||||
size_um: RGBValue
|
||||
|
||||
def __init__(self, strength: RGBValue, size_um: RGBValue) -> None:
|
||||
self.strength = strength
|
||||
self.size_um = size_um
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Halation(strength={self.strength}, size_um={self.size_um})"
|
||||
|
||||
|
||||
class Interlayer:
|
||||
diffusion_um: float
|
||||
|
||||
def __init__(self, diffusion_um: float) -> None:
|
||||
self.diffusion_um = diffusion_um
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Interlayer(diffusion_um={self.diffusion_um:.1f})"
|
||||
|
||||
|
||||
class Calibration:
|
||||
iso: int
|
||||
middle_gray_logE: float
|
||||
|
||||
def __init__(self, iso: int, middle_gray_logE: float) -> None:
|
||||
self.iso = iso
|
||||
self.middle_gray_logE = middle_gray_logE
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Calibration(iso={self.iso}\nmiddle_gray_logE={self.middle_gray_logE:.3f})"
|
||||
)
|
||||
|
||||
|
||||
class Properties:
|
||||
halation: Halation
|
||||
couplers: Couplers
|
||||
interlayer: Interlayer
|
||||
curves: Curves
|
||||
calibration: Calibration
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
halation: Halation,
|
||||
couplers: Couplers,
|
||||
interlayer: Interlayer,
|
||||
curves: Curves,
|
||||
calibration: Calibration,
|
||||
) -> None:
|
||||
self.halation = halation
|
||||
self.couplers = couplers
|
||||
self.interlayer = interlayer
|
||||
self.curves = curves
|
||||
self.calibration = calibration
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Properties(halation={self.halation}\ncouplers={self.couplers}\n"
|
||||
f"interlayer={self.interlayer}\ncurves={self.curves}\n"
|
||||
f"calibration={self.calibration})"
|
||||
)
|
||||
|
||||
|
||||
class FilmDatasheet:
|
||||
info: Info
|
||||
processing: Processing
|
||||
properties: Properties
|
||||
|
||||
def __init__(
|
||||
self, info: Info, processing: Processing, properties: Properties
|
||||
) -> None:
|
||||
self.info = info
|
||||
self.processing = processing
|
||||
self.properties = properties
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"FilmDatasheet(info={self.info}\nprocessing={self.processing}\n"
|
||||
f"properties={self.properties})"
|
||||
)
|
||||
|
||||
import pprint
|
||||
|
||||
def parse_datasheet_json(json_filepath) -> FilmDatasheet | None:
|
||||
# Parse JSON into FilmDatasheet object
|
||||
"""Parses the film datasheet JSON file.
|
||||
Args:
|
||||
json_filepath (str): Path to the datasheet JSON file.
|
||||
Returns:
|
||||
FilmDatasheet: Parsed datasheet object.
|
||||
"""
|
||||
if not os.path.exists(json_filepath):
|
||||
print(f"Error: Datasheet file not found at {json_filepath}", file=sys.stderr)
|
||||
return None
|
||||
try:
|
||||
with open(json_filepath, "r") as jsonfile:
|
||||
data = json.load(jsonfile)
|
||||
# Parse the JSON data into the FilmDatasheet structure
|
||||
info = Info(
|
||||
name=data["info"]["name"],
|
||||
description=data["info"]["description"],
|
||||
format_mm=data["info"]["format_mm"],
|
||||
version=data["info"]["version"],
|
||||
)
|
||||
gamma = Gamma(
|
||||
r_factor=data["processing"]["gamma"]["r_factor"],
|
||||
g_factor=data["processing"]["gamma"]["g_factor"],
|
||||
b_factor=data["processing"]["gamma"]["b_factor"],
|
||||
)
|
||||
balance = Balance(
|
||||
r_shift=data["processing"]["balance"]["r_shift"],
|
||||
g_shift=data["processing"]["balance"]["g_shift"],
|
||||
b_shift=data["processing"]["balance"]["b_shift"],
|
||||
)
|
||||
processing = Processing(gamma=gamma, balance=balance)
|
||||
halation = Halation(
|
||||
strength=RGBValue(
|
||||
r=data["properties"]["halation"]["strength"]["r"],
|
||||
g=data["properties"]["halation"]["strength"]["g"],
|
||||
b=data["properties"]["halation"]["strength"]["b"],
|
||||
),
|
||||
size_um=RGBValue(
|
||||
r=data["properties"]["halation"]["size_um"]["r"],
|
||||
g=data["properties"]["halation"]["size_um"]["g"],
|
||||
b=data["properties"]["halation"]["size_um"]["b"],
|
||||
),
|
||||
)
|
||||
couplers = Couplers(
|
||||
amount=data["properties"]["couplers"]["amount"],
|
||||
diffusion_um=data["properties"]["couplers"]["diffusion_um"],
|
||||
)
|
||||
interlayer = Interlayer(
|
||||
diffusion_um=data["properties"]["interlayer"]["diffusion_um"]
|
||||
)
|
||||
calibration = Calibration(
|
||||
iso=data["properties"]["calibration"]["iso"],
|
||||
middle_gray_logE=data["properties"]["calibration"]["middle_gray_logh"],
|
||||
)
|
||||
curves = Curves(
|
||||
hd=[
|
||||
HDCurvePoint(d=point["d"], r=point["r"], g=point["g"], b=point["b"])
|
||||
for point in data["properties"]["curves"]["hd"]
|
||||
],
|
||||
spectral_sensitivity=[
|
||||
SpectralSensitivityCurvePoint(
|
||||
wavelength=point["wavelength"],
|
||||
y=point["y"],
|
||||
m=point["m"],
|
||||
c=point["c"],
|
||||
)
|
||||
for point in data["properties"]["curves"]["spectral_sensitivity"]
|
||||
],
|
||||
)
|
||||
print(f"Parsed {len(curves.hd)} H&D curve points.")
|
||||
properties = Properties(
|
||||
calibration=calibration,
|
||||
halation=halation,
|
||||
couplers=couplers,
|
||||
interlayer=interlayer,
|
||||
curves=curves,
|
||||
)
|
||||
return FilmDatasheet(
|
||||
info=info, processing=processing, properties=properties
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error parsing datasheet JSON '{json_filepath}': {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def um_to_pixels(sigma_um, image_width_px, film_format_mm):
|
||||
"""Converts sigma from micrometers to pixels."""
|
||||
if film_format_mm <= 0 or image_width_px <= 0:
|
||||
return 0
|
||||
microns_per_pixel = (film_format_mm * 1000.0) / image_width_px
|
||||
sigma_pixels = sigma_um / microns_per_pixel
|
||||
return sigma_pixels
|
||||
|
||||
|
||||
def apply_hd_curves(
|
||||
log_exposure_rgb,
|
||||
processing: Processing,
|
||||
hd_curve: List[HDCurvePoint],
|
||||
middle_gray_logE: float,
|
||||
) -> np.ndarray:
|
||||
"""Applies H&D curves to log exposure values."""
|
||||
density_rgb = np.zeros_like(log_exposure_rgb)
|
||||
gamma_factors = [
|
||||
processing.gamma.r_factor,
|
||||
processing.gamma.g_factor,
|
||||
processing.gamma.b_factor,
|
||||
]
|
||||
balance_shifts = [
|
||||
processing.balance.r_shift,
|
||||
processing.balance.g_shift,
|
||||
processing.balance.b_shift,
|
||||
]
|
||||
|
||||
min_logE = hd_curve[0].d
|
||||
max_logE = hd_curve[-1].d
|
||||
min_densities = [hd_curve[0].g, hd_curve[0].g, hd_curve[0].g]
|
||||
max_densities = [hd_curve[-1].g, hd_curve[-1].g, hd_curve[-1].g]
|
||||
|
||||
for i, channel in enumerate(["R", "G", "B"]):
|
||||
# Apply gamma factor (affects contrast by scaling log exposure input)
|
||||
# Handle potential division by zero if gamma factor is 0
|
||||
gamma_factor = gamma_factors[i]
|
||||
if abs(gamma_factor) < EPSILON:
|
||||
print(
|
||||
f"Warning: Gamma factor for channel {channel} is near zero. Clamping to {EPSILON}.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
gamma_factor = EPSILON if gamma_factor >= 0 else -EPSILON
|
||||
|
||||
# Adjust log exposure relative to middle gray before applying gamma
|
||||
log_exposure_adjusted = (
|
||||
middle_gray_logE
|
||||
+ (log_exposure_rgb[..., i] - middle_gray_logE) / gamma_factor
|
||||
)
|
||||
|
||||
# Clamp adjusted exposure to the range defined in the H&D data before interpolation
|
||||
log_exposure_clamped = np.clip(log_exposure_adjusted, min_logE, max_logE)
|
||||
|
||||
# Create interpolation function for the current channel
|
||||
if channel == "R":
|
||||
interp_func = interp1d(
|
||||
[d.d for d in hd_curve],
|
||||
[d.r for d in hd_curve],
|
||||
kind="linear", # Linear interpolation is common, could be 'cubic'
|
||||
bounds_error=False, # Allows extrapolation, but we clamp manually below
|
||||
fill_value=(min_densities[i], max_densities[i]), # type: ignore
|
||||
)
|
||||
elif channel == "G":
|
||||
interp_func = interp1d(
|
||||
[d.d for d in hd_curve],
|
||||
[d.g for d in hd_curve],
|
||||
kind="linear", # Linear interpolation is common, could be 'cubic'
|
||||
bounds_error=False, # Allows extrapolation, but we clamp manually below
|
||||
fill_value=(min_densities[i], max_densities[i]), # type: ignore
|
||||
)
|
||||
else:
|
||||
interp_func = interp1d(
|
||||
[d.d for d in hd_curve],
|
||||
[d.b for d in hd_curve],
|
||||
kind="linear", # Linear interpolation is common, could be 'cubic'
|
||||
bounds_error=False, # Allows extrapolation, but we clamp manually below
|
||||
fill_value=(min_densities[i], max_densities[i]), # type: ignore
|
||||
)
|
||||
|
||||
# Apply interpolation
|
||||
density = interp_func(log_exposure_clamped)
|
||||
|
||||
# Apply density balance shift (additive density offset)
|
||||
density += balance_shifts[i]
|
||||
|
||||
density_rgb[..., i] = np.maximum(density, 0) # Ensure density is non-negative
|
||||
|
||||
return density_rgb
|
||||
|
||||
|
||||
def apply_saturation_rgb(image_linear, saturation_factor):
|
||||
"""Adjusts saturation directly in RGB space."""
|
||||
if saturation_factor == 1.0:
|
||||
return image_linear
|
||||
|
||||
# Luminance weights for sRGB primaries (Rec.709)
|
||||
luminance = (
|
||||
0.2126 * image_linear[..., 0]
|
||||
+ 0.7152 * image_linear[..., 1]
|
||||
+ 0.0722 * image_linear[..., 2]
|
||||
)
|
||||
|
||||
# Expand luminance to 3 channels for broadcasting
|
||||
luminance_rgb = np.expand_dims(luminance, axis=-1)
|
||||
|
||||
# Apply saturation: Lerp between luminance (grayscale) and original color
|
||||
saturated_image = luminance_rgb + saturation_factor * (image_linear - luminance_rgb)
|
||||
|
||||
# Clip results to valid range (important after saturation boost)
|
||||
return np.clip(saturated_image, 0.0, 1.0)
|
||||
|
||||
|
||||
def apply_spatial_effects(
|
||||
image,
|
||||
film_format_mm,
|
||||
couplerData: Couplers,
|
||||
interlayerData: Interlayer,
|
||||
halationData: Halation,
|
||||
image_width_px,
|
||||
):
|
||||
"""Applies diffusion blur and halation."""
|
||||
# Combine diffusion effects (assuming they add quadratically in terms of sigma)
|
||||
total_diffusion_um = np.sqrt(
|
||||
couplerData.diffusion_um**2 + interlayerData.diffusion_um**2
|
||||
)
|
||||
|
||||
if total_diffusion_um > EPSILON:
|
||||
sigma_pixels_diffusion = um_to_pixels(
|
||||
total_diffusion_um, image_width_px, film_format_mm
|
||||
)
|
||||
if sigma_pixels_diffusion > EPSILON:
|
||||
print(
|
||||
f"Applying diffusion blur: sigma={sigma_pixels_diffusion:.2f} pixels ({total_diffusion_um:.1f} um)"
|
||||
)
|
||||
# Apply blur to the linear image data
|
||||
image = gaussian_filter(
|
||||
image,
|
||||
sigma=[sigma_pixels_diffusion, sigma_pixels_diffusion, 0],
|
||||
mode="nearest",
|
||||
) # Blur R, G, B independently
|
||||
image = np.clip(image, 0.0, 1.0) # Keep values in range
|
||||
|
||||
# --- 2. Apply Halation ---
|
||||
# This simulates light scattering back through the emulsion
|
||||
halation_applied = False
|
||||
blurred_image_halation = np.copy(
|
||||
image
|
||||
) # Start with potentially diffusion-blurred image
|
||||
|
||||
strengths = [
|
||||
halationData.strength.r,
|
||||
halationData.strength.g,
|
||||
halationData.strength.b,
|
||||
]
|
||||
sizes_um = [
|
||||
halationData.size_um.r,
|
||||
halationData.size_um.g,
|
||||
halationData.size_um.b,
|
||||
]
|
||||
|
||||
for i in range(3):
|
||||
strength = strengths[i]
|
||||
size_um = sizes_um[i]
|
||||
if strength > EPSILON and size_um > EPSILON:
|
||||
sigma_pixels_halation = um_to_pixels(
|
||||
size_um, image_width_px, film_format_mm
|
||||
)
|
||||
if sigma_pixels_halation > EPSILON:
|
||||
halation_applied = True
|
||||
print(
|
||||
f"Applying halation blur (Channel {i}): sigma={sigma_pixels_halation:.2f} pixels ({size_um:.1f} um), strength={strength:.3f}"
|
||||
)
|
||||
# Blur only the current channel for halation effect
|
||||
channel_blurred = gaussian_filter(
|
||||
image[..., i], sigma=sigma_pixels_halation, mode="nearest"
|
||||
)
|
||||
# Add the blurred channel back, weighted by strength
|
||||
blurred_image_halation[..., i] = (
|
||||
image[..., i] * (1.0 - strength) + channel_blurred * strength
|
||||
)
|
||||
|
||||
if halation_applied:
|
||||
# Clip final result after halation
|
||||
image = np.clip(blurred_image_halation, 0.0, 1.0)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simulate film stock color characteristics using a datasheet JSON."
|
||||
)
|
||||
parser.add_argument(
|
||||
"input_image",
|
||||
help="Path to the input RGB image (e.g., PNG, TIFF). Assumed linear RGB.",
|
||||
)
|
||||
parser.add_argument("datasheet_json", help="Path to the film datasheet JSON file.")
|
||||
parser.add_argument("output_image", help="Path to save the output emulated image.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# --- Load Datasheet ---
|
||||
print(f"Loading datasheet: {args.datasheet_json}")
|
||||
datasheet: FilmDatasheet | None = parse_datasheet_json(args.datasheet_json)
|
||||
if datasheet is None:
|
||||
sys.exit(1)
|
||||
print(
|
||||
f"Simulating: {datasheet.info.name} ({datasheet.info.format_mm}mm) (v{datasheet.info.version})\n\t{datasheet.info.description}"
|
||||
)
|
||||
|
||||
import pprint
|
||||
pprint.pp(datasheet)
|
||||
|
||||
# --- Load Input Image ---
|
||||
print(f"Loading input image: {args.input_image}")
|
||||
try:
|
||||
# For DNG files, force reading the raw image data, not the embedded thumbnail
|
||||
if (
|
||||
args.input_image.lower().endswith(".dng")
|
||||
or args.input_image.lower().endswith(".raw")
|
||||
or args.input_image.lower().endswith(".arw")
|
||||
):
|
||||
print("Detected Camera RAW file, reading raw image data...")
|
||||
with rawpy.imread(args.input_image) as raw:
|
||||
image_raw = raw.postprocess(
|
||||
demosaic_algorithm=rawpy.DemosaicAlgorithm.AHD, # type: ignore
|
||||
output_bps=16, # Use 16-bit output for better precision
|
||||
use_camera_wb=True, # Use camera white balance
|
||||
no_auto_bright=True, # Disable auto brightness adjustment
|
||||
output_color=rawpy.ColorSpace.sRGB, # type: ignore
|
||||
)
|
||||
# If the image has more than 3 channels, try to select the first 3 (RGB)
|
||||
if image_raw.ndim == 3 and image_raw.shape[-1] > 3:
|
||||
image_raw = image_raw[..., :3]
|
||||
else:
|
||||
image_raw = iio.imread(args.input_image)
|
||||
except FileNotFoundError:
|
||||
print(
|
||||
f"Error: Input image file not found at {args.input_image}", file=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading input image: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Check if image is likely sRGB (8-bit or 16-bit integer types are usually sRGB)
|
||||
is_srgb = image_raw.dtype in (np.uint8, np.uint16)
|
||||
if is_srgb:
|
||||
print("Input image appears to be sRGB. Linearizing...")
|
||||
# Convert to float in [0,1]
|
||||
if image_raw.dtype == np.uint8:
|
||||
image_float = image_raw.astype(np.float64) / 255.0
|
||||
elif image_raw.dtype == np.uint16:
|
||||
image_float = image_raw.astype(np.float64) / 65535.0
|
||||
else:
|
||||
image_float = image_raw.astype(np.float64)
|
||||
|
||||
# sRGB to linear conversion
|
||||
def srgb_to_linear(c):
|
||||
c = np.clip(c, 0.0, 1.0)
|
||||
return np.where(c <= 0.04045, c / 12.92, ((c + 0.055) / 1.055) ** 2.4)
|
||||
|
||||
image_raw = srgb_to_linear(image_float)
|
||||
else:
|
||||
print("Input image is assumed to be linear RGB.")
|
||||
|
||||
# --- Prepare Image Data ---
|
||||
# Convert to float64 for precision, handle different input types
|
||||
if image_raw.dtype == np.uint8:
|
||||
image_linear = image_raw.astype(np.float64) / 255.0
|
||||
elif image_raw.dtype == np.uint16:
|
||||
image_linear = image_raw.astype(np.float64) / 65535.0
|
||||
elif image_raw.dtype == np.float32:
|
||||
image_linear = image_raw.astype(np.float64)
|
||||
elif image_raw.dtype == np.float64:
|
||||
image_linear = image_raw
|
||||
else:
|
||||
print(f"Error: Unsupported image data type: {image_raw.dtype}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Discard alpha channel if present
|
||||
if image_linear.shape[-1] == 4:
|
||||
print("Discarding alpha channel.")
|
||||
image_linear = image_linear[..., :3]
|
||||
elif image_linear.shape[-1] != 3:
|
||||
print(
|
||||
f"Error: Input image must be RGB (shape {image_linear.shape} not supported).",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Ensure input is non-negative
|
||||
image_linear = np.maximum(image_linear, 0.0)
|
||||
print(f"Input image dimensions: {image_linear.shape}")
|
||||
image_width_px = image_linear.shape[1]
|
||||
|
||||
# --- Pipeline Steps ---
|
||||
|
||||
# 1. Convert Linear RGB to Log Exposure (LogE)
|
||||
# Map linear 0.18 to the specified middle_gray_logE from the datasheet
|
||||
print("Converting linear RGB to Log Exposure...")
|
||||
middle_gray_logE = float(datasheet.properties.calibration.middle_gray_logE)
|
||||
# Add epsilon inside log10 to handle pure black pixels
|
||||
log_exposure_rgb = middle_gray_logE + np.log10(image_linear / 0.18 + EPSILON)
|
||||
# Note: Values below 0.18 * 10**(hd_data['LogE'][0] - middle_gray_logE)
|
||||
# or above 0.18 * 10**(hd_data['LogE'][-1] - middle_gray_logE)
|
||||
# will map outside the H&D curve's defined LogE range and rely on clamping/extrapolation.
|
||||
|
||||
# 2. Apply H&D Curves (Tonal Mapping + Balance Shifts + Gamma/Contrast)
|
||||
print("Applying H&D curves...")
|
||||
density_rgb = apply_hd_curves(
|
||||
log_exposure_rgb,
|
||||
datasheet.processing,
|
||||
datasheet.properties.curves.hd,
|
||||
middle_gray_logE,
|
||||
)
|
||||
|
||||
# 3. Convert Density back to Linear Transmittance
|
||||
# Higher density means lower transmittance
|
||||
print("Converting density to linear transmittance...")
|
||||
# Add density epsilon? Usually density floor (Dmin) handles this.
|
||||
linear_transmittance = 10.0 ** (-density_rgb)
|
||||
# Normalize transmittance? Optional, assumes Dmin corresponds roughly to max transmittance 1.0
|
||||
# Could normalize relative to Dmin from the curves if needed.
|
||||
# linear_transmittance = linear_transmittance / (10.0**(-np.array([hd_data['Density_R'][0], hd_data['Density_G'][0], hd_data['Density_B'][0]])))
|
||||
linear_transmittance = np.clip(linear_transmittance, 0.0, 1.0)
|
||||
|
||||
# 4. Apply Spatial Effects (Diffusion Blur, Halation)
|
||||
print("Applying spatial effects (diffusion, halation)...")
|
||||
# Apply these effects in the linear domain
|
||||
linear_post_spatial = apply_spatial_effects(
|
||||
linear_transmittance,
|
||||
datasheet.info.format_mm,
|
||||
datasheet.properties.couplers,
|
||||
datasheet.properties.interlayer,
|
||||
datasheet.properties.halation,
|
||||
image_width_px,
|
||||
)
|
||||
|
||||
# 5. Apply Saturation Adjustment (Approximating Coupler Effects)
|
||||
print("Applying saturation adjustment...")
|
||||
coupler_amount = datasheet.properties.couplers.amount
|
||||
# Assuming coupler_amount directly scales saturation factor.
|
||||
# Values > 1 increase saturation, < 1 decrease.
|
||||
linear_post_saturation = apply_saturation_rgb(linear_post_spatial, coupler_amount)
|
||||
|
||||
# --- Final Output Conversion ---
|
||||
print("Converting to output format...")
|
||||
# Clip final result and convert to uint8
|
||||
if args.output_image.lower().endswith(".tiff"):
|
||||
output_image_uint8 = (
|
||||
np.clip(linear_post_saturation, 0.0, 1.0) * 65535.0
|
||||
).astype(np.uint16)
|
||||
else:
|
||||
output_image_uint8 = (np.clip(linear_post_saturation, 0.0, 1.0) * 255.0).astype(
|
||||
np.uint8
|
||||
)
|
||||
|
||||
# --- Save Output Image ---
|
||||
print(f"Saving output image: {args.output_image}")
|
||||
try:
|
||||
if args.output_image.lower().endswith((".tiff", ".tif")):
|
||||
# Use imageio for standard formats
|
||||
iio.imwrite(args.output_image, output_image_uint8)
|
||||
elif args.output_image.lower().endswith(".png"):
|
||||
iio.imwrite(args.output_image, output_image_uint8, format="PNG")
|
||||
else:
|
||||
iio.imwrite(args.output_image, output_image_uint8, quality=95)
|
||||
print("Done.")
|
||||
except Exception as e:
|
||||
print(f"Error writing output image: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
432
filmgrain
Executable file
432
filmgrain
Executable file
@ -0,0 +1,432 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# dependencies = [
|
||||
# "numpy",
|
||||
# "scipy",
|
||||
# "Pillow",
|
||||
# "imageio",
|
||||
# "warp-lang",
|
||||
# "rawpy",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
|
||||
import warp as wp
|
||||
import numpy as np
|
||||
import math
|
||||
import argparse
|
||||
import imageio.v3 as iio
|
||||
from scipy.integrate import quad
|
||||
from scipy.signal.windows import gaussian # For creating Gaussian kernel
|
||||
import rawpy
|
||||
|
||||
wp.init()
|
||||
|
||||
# --- Helper functions based on the paper ---
|
||||
|
||||
def save_debug_image(wp_array, filename):
|
||||
"""Saves a Warp 2D array as a grayscale image, scaling values."""
|
||||
try:
|
||||
numpy_array = wp_array.numpy()
|
||||
min_val, max_val = np.min(numpy_array), np.max(numpy_array)
|
||||
if max_val > min_val:
|
||||
norm_array = (numpy_array - min_val) / (max_val - min_val)
|
||||
else:
|
||||
norm_array = np.full_like(numpy_array, 0.5)
|
||||
|
||||
img_uint8 = (norm_array * 255.0).clip(0, 255).astype(np.uint8)
|
||||
iio.imwrite(filename, img_uint8)
|
||||
print(f"Debug image saved to {filename}")
|
||||
except Exception as e:
|
||||
print(f"Error saving debug image {filename}: {e}")
|
||||
|
||||
|
||||
@wp.func
|
||||
def w_func(x: float):
|
||||
if x >= 2.0:
|
||||
return 0.0
|
||||
elif x < 0.0:
|
||||
return 1.0
|
||||
else:
|
||||
arg = x / 2.0
|
||||
if arg > 1.0:
|
||||
arg = 1.0
|
||||
elif arg < -1.0:
|
||||
arg = -1.0
|
||||
sqrt_arg = 4.0 - x * x
|
||||
if sqrt_arg < 0.0:
|
||||
sqrt_val = 0.0
|
||||
else:
|
||||
sqrt_val = wp.sqrt(sqrt_arg) # This should be sqrt(1 - (x/2)^2) for unit disk overlap area formula
|
||||
|
||||
# Corrected w(x) based on overlap area of two unit disks divided by pi
|
||||
# overlap_area = 2 * acos(x/2) - x * sqrt(1 - (x/2)^2)
|
||||
# w(x) = overlap_area / pi
|
||||
# Ensure acos argument is clamped
|
||||
acos_arg = x / 2.0
|
||||
if acos_arg > 1.0: acos_arg = 1.0
|
||||
if acos_arg < -1.0: acos_arg = -1.0
|
||||
|
||||
sqrt_term_arg = 1.0 - acos_arg * acos_arg
|
||||
if sqrt_term_arg < 0.0: sqrt_term_arg = 0.0
|
||||
|
||||
overlap_area_over_pi = (
|
||||
2.0 * wp.acos(acos_arg) - x * wp.sqrt(sqrt_term_arg)
|
||||
) / math.pi
|
||||
return overlap_area_over_pi
|
||||
|
||||
|
||||
@wp.func
|
||||
def CB_const_radius_unit(u_pixel: float, x: float):
|
||||
safe_u = wp.min(u_pixel, 0.99999)
|
||||
if safe_u < 0.0:
|
||||
safe_u = 0.0
|
||||
|
||||
one_minus_u = 1.0 - safe_u
|
||||
if one_minus_u <= 1e-9: # Avoid pow(small_negative_base) or pow(0, neg_exponent from wx)
|
||||
return 0.0
|
||||
|
||||
wx = w_func(x)
|
||||
# Ensure 2.0 - wx does not lead to issues if wx is large, though wx should be <= 1
|
||||
exponent = 2.0 - wx # type: ignore
|
||||
|
||||
term1 = wp.pow(one_minus_u, exponent)
|
||||
term2 = one_minus_u * one_minus_u
|
||||
return term1 - term2
|
||||
|
||||
|
||||
def integrand_variance(x, u_pixel):
|
||||
if x < 0: return 0.0
|
||||
if x >= 2.0: return 0.0
|
||||
|
||||
safe_u = np.clip(u_pixel, 0.0, 0.99999)
|
||||
one_minus_u = 1.0 - safe_u
|
||||
if one_minus_u <= 1e-9: return 0.0
|
||||
|
||||
acos_arg = x / 2.0
|
||||
if acos_arg > 1.0: acos_arg = 1.0
|
||||
if acos_arg < -1.0: acos_arg = -1.0
|
||||
|
||||
sqrt_term_arg = 1.0 - acos_arg * acos_arg
|
||||
if sqrt_term_arg < 0.0: sqrt_term_arg = 0.0
|
||||
|
||||
wx = (2.0 * np.arccos(acos_arg) - x * np.sqrt(sqrt_term_arg)) / np.pi
|
||||
|
||||
cb = np.power(one_minus_u, 2.0 - wx) - np.power(one_minus_u, 2.0)
|
||||
return cb * x
|
||||
|
||||
|
||||
def precompute_variance_lut(num_u_samples=256):
|
||||
"""
|
||||
Precomputes the integral I(u) = ∫[0 to 2] CB(u, x, 1) * x dx for different u values.
|
||||
Creates a LUT with num_u_samples + 1 entries (for u=0 to u=1 inclusive).
|
||||
"""
|
||||
print(f"Precomputing variance LUT with {num_u_samples+1} entries...")
|
||||
# Samples u from 0 to 1 inclusive for the LUT
|
||||
u_values_for_lut = np.linspace(0.0, 1.0, num_u_samples + 1, endpoint=True)
|
||||
lut = np.zeros(num_u_samples + 1, dtype=np.float32)
|
||||
|
||||
for i, u in enumerate(u_values_for_lut):
|
||||
result, error = quad(
|
||||
integrand_variance, 0, 2, args=(u,), epsabs=1e-6, limit=100
|
||||
)
|
||||
if result < 0: result = 0.0
|
||||
lut[i] = result
|
||||
if i % ((num_u_samples + 1) // 10) == 0 :
|
||||
print(f" LUT progress: {i}/{num_u_samples+1}")
|
||||
print("Variance LUT computed.")
|
||||
return lut
|
||||
|
||||
|
||||
@wp.kernel
|
||||
def generate_noise_kernel(
|
||||
u_image: wp.array2d(dtype=float),
|
||||
variance_lut: wp.array(dtype=float),
|
||||
noise_out: wp.array2d(dtype=float),
|
||||
mu_r: float,
|
||||
sigma_filter: float,
|
||||
seed: int,
|
||||
):
|
||||
ix, iy = wp.tid()
|
||||
height = u_image.shape[0]
|
||||
width = u_image.shape[1]
|
||||
if ix >= height or iy >= width: return
|
||||
|
||||
lut_size = variance_lut.shape[0]
|
||||
u_val = u_image[ix, iy]
|
||||
|
||||
lut_pos = u_val * float(lut_size - 1)
|
||||
lut_index0 = int(lut_pos)
|
||||
lut_index0 = wp.min(wp.max(lut_index0, 0), lut_size - 2) # Ensure lut_index0 and lut_index0+1 are valid
|
||||
lut_index1 = lut_index0 + 1
|
||||
t = lut_pos - float(lut_index0)
|
||||
if t < 0.0: t = 0.0 # Clamp t to avoid issues with precision
|
||||
if t > 1.0: t = 1.0
|
||||
|
||||
integral_val = wp.lerp(variance_lut[lut_index0], variance_lut[lut_index1], t)
|
||||
|
||||
var_bp = 0.0
|
||||
if sigma_filter > 1e-6 and mu_r > 1e-6: # mu_r check also important
|
||||
var_bp = wp.max(0.0, (mu_r * mu_r) / (2.0 * sigma_filter * sigma_filter) * integral_val)
|
||||
|
||||
std_dev = wp.sqrt(var_bp)
|
||||
state = wp.rand_init(seed, ix * width + iy + seed) # Add seed to sequence as well
|
||||
noise_sample = wp.randn(state) * std_dev
|
||||
noise_out[ix, iy] = noise_sample
|
||||
|
||||
@wp.kernel
|
||||
def convolve_2d_kernel(
|
||||
input_array: wp.array2d(dtype=float),
|
||||
kernel: wp.array(dtype=float),
|
||||
kernel_radius: int,
|
||||
output_array: wp.array2d(dtype=float),
|
||||
):
|
||||
ix, iy = wp.tid()
|
||||
height = input_array.shape[0]
|
||||
width = input_array.shape[1]
|
||||
if ix >= height or iy >= width: return
|
||||
|
||||
kernel_dim = 2 * kernel_radius + 1
|
||||
accum = float(0.0)
|
||||
|
||||
for ky_offset in range(kernel_dim):
|
||||
for kx_offset in range(kernel_dim):
|
||||
k_idx = ky_offset * kernel_dim + kx_offset
|
||||
weight = kernel[k_idx]
|
||||
|
||||
# Image coordinates to sample from
|
||||
read_row = ix + (ky_offset - kernel_radius) # Corrected: ix is row, iy is col usually
|
||||
read_col = iy + (kx_offset - kernel_radius)
|
||||
|
||||
clamped_row = wp.max(0, wp.min(read_row, height - 1))
|
||||
clamped_col = wp.max(0, wp.min(read_col, width - 1))
|
||||
|
||||
sample_val = input_array[clamped_row, clamped_col]
|
||||
accum += weight * sample_val
|
||||
output_array[ix, iy] = accum
|
||||
|
||||
@wp.kernel
|
||||
def add_rgb_noise_and_clip_kernel(
|
||||
r_in: wp.array2d(dtype=float),
|
||||
g_in: wp.array2d(dtype=float),
|
||||
b_in: wp.array2d(dtype=float),
|
||||
noise_r: wp.array2d(dtype=float),
|
||||
noise_g: wp.array2d(dtype=float),
|
||||
noise_b: wp.array2d(dtype=float),
|
||||
r_out: wp.array2d(dtype=float),
|
||||
g_out: wp.array2d(dtype=float),
|
||||
b_out: wp.array2d(dtype=float)):
|
||||
"""Adds channel-specific filtered noise to each channel and clips."""
|
||||
ix, iy = wp.tid() # type: ignore
|
||||
|
||||
height = r_in.shape[0]
|
||||
width = r_in.shape[1]
|
||||
if ix >= height or iy >= width: return
|
||||
|
||||
|
||||
r_out[ix, iy] = wp.clamp(r_in[ix, iy] + noise_r[ix, iy], 0.0, 1.0) # type: ignore
|
||||
g_out[ix, iy] = wp.clamp(g_in[ix, iy] + noise_g[ix, iy], 0.0, 1.0) # type: ignore
|
||||
b_out[ix, iy] = wp.clamp(b_in[ix, iy] + noise_b[ix, iy], 0.0, 1.0) # type: ignore
|
||||
|
||||
|
||||
def create_gaussian_kernel_2d(sigma, radius):
|
||||
kernel_size = 2 * radius + 1
|
||||
g = gaussian(kernel_size, sigma, sym=True) # Ensure symmetry for odd kernel_size
|
||||
kernel_2d = np.outer(g, g)
|
||||
sum_sq = np.sum(kernel_2d**2)
|
||||
if sum_sq > 1e-9: # Avoid division by zero if kernel is all zeros
|
||||
kernel_2d /= np.sqrt(sum_sq)
|
||||
return kernel_2d.flatten().astype(np.float32)
|
||||
|
||||
|
||||
def render_film_grain(image_path, mu_r, sigma_filter, output_path, seed=42, mono=False):
|
||||
try:
|
||||
if image_path.lower().endswith('.arw') or image_path.lower().endswith('.dng'):
|
||||
# Use rawpy for TIFF images to handle metadata correctly
|
||||
with rawpy.imread(image_path) as raw:
|
||||
img_np = raw.postprocess(
|
||||
use_camera_wb=True,
|
||||
no_auto_bright=True,
|
||||
output_bps=16,
|
||||
half_size=False,
|
||||
gamma=(1.0, 1.0), # No gamma correction
|
||||
)
|
||||
elif image_path.lower().endswith('.tiff') or image_path.lower().endswith('.tif') or image_path.lower().endswith('.png') or image_path.lower().endswith('.jpg') or image_path.lower().endswith('.jpeg'):
|
||||
img_np = iio.imread(image_path)
|
||||
else:
|
||||
raise ValueError("Unsupported image format. Please use TIFF, PNG, JPG, or RAW (ARW, DNG) formats.")
|
||||
except FileNotFoundError:
|
||||
print(f"Error: Input image not found at {image_path}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Error loading image: {e}")
|
||||
return
|
||||
|
||||
if img_np.ndim == 2:
|
||||
img_np = img_np[..., np.newaxis]
|
||||
if img_np.shape[2] == 4:
|
||||
img_np = img_np[..., :3]
|
||||
|
||||
|
||||
if img_np.dtype == np.uint8:
|
||||
img_np_float = img_np.astype(np.float32) / 255.0
|
||||
elif img_np.dtype == np.uint16:
|
||||
img_np_float = img_np.astype(np.float32) / 65535.0
|
||||
else:
|
||||
img_np_float = img_np.astype(np.float32)
|
||||
|
||||
height, width, channels = img_np_float.shape
|
||||
|
||||
print(f"Input image: {width}x{height}x{channels}")
|
||||
print(f"Parameters: μr = {mu_r}, σ_filter = {sigma_filter}")
|
||||
|
||||
# Use 256 u_samples for LUT, resulting in 257 entries (0 to 256 for u=0 to u=1)
|
||||
variance_lut_np = precompute_variance_lut(num_u_samples=256)
|
||||
variance_lut_wp = wp.array(variance_lut_np, dtype=float, device="cuda")
|
||||
|
||||
kernel_radius = max(1, int(np.ceil(3 * sigma_filter)))
|
||||
kernel_np = create_gaussian_kernel_2d(sigma_filter, kernel_radius)
|
||||
kernel_wp = wp.array(kernel_np, dtype=float, device="cuda")
|
||||
print(f"Using Gaussian filter h with sigma={sigma_filter}, radius={kernel_radius}")
|
||||
|
||||
# --- Prepare original channel data on GPU ---
|
||||
r_original_wp = wp.array(img_np_float[:, :, 0], dtype=float, device="cuda")
|
||||
if channels == 3:
|
||||
g_original_wp = wp.array(img_np_float[:, :, 1], dtype=float, device="cuda")
|
||||
b_original_wp = wp.array(img_np_float[:, :, 2], dtype=float, device="cuda")
|
||||
else: # Grayscale input
|
||||
g_original_wp = r_original_wp
|
||||
b_original_wp = r_original_wp
|
||||
|
||||
# --- Allocate noise arrays on GPU ---
|
||||
noise_r_unfiltered_wp = wp.empty_like(r_original_wp)
|
||||
noise_g_unfiltered_wp = wp.empty_like(g_original_wp)
|
||||
noise_b_unfiltered_wp = wp.empty_like(b_original_wp)
|
||||
|
||||
noise_r_filtered_wp = wp.empty_like(r_original_wp)
|
||||
noise_g_filtered_wp = wp.empty_like(g_original_wp)
|
||||
noise_b_filtered_wp = wp.empty_like(b_original_wp)
|
||||
|
||||
if mono:
|
||||
if channels == 1:
|
||||
img_gray_np = img_np_float[:, :, 0]
|
||||
else:
|
||||
# Standard RGB to Luminance weights
|
||||
img_gray_np = (0.299 * img_np_float[:, :, 0] +
|
||||
0.587 * img_np_float[:, :, 1] +
|
||||
0.114 * img_np_float[:, :, 2])
|
||||
print("Generating monochromatic noise...")
|
||||
u_gray_wp = wp.array(img_gray_np, dtype=float, device="cuda")
|
||||
noise_image_wp = wp.empty_like(u_gray_wp)
|
||||
wp.launch(kernel=generate_noise_kernel,
|
||||
dim=(height, width),
|
||||
inputs=[u_gray_wp, variance_lut_wp, noise_image_wp, mu_r, sigma_filter, seed],
|
||||
device="cuda")
|
||||
noise_filtered_wp = wp.empty_like(u_gray_wp)
|
||||
wp.launch(kernel=convolve_2d_kernel,
|
||||
dim=(height, width),
|
||||
inputs=[noise_image_wp, kernel_wp, kernel_radius, noise_filtered_wp],
|
||||
device="cuda")
|
||||
noise_r_filtered_wp.assign(noise_filtered_wp)
|
||||
noise_g_filtered_wp.assign(noise_filtered_wp)
|
||||
noise_b_filtered_wp.assign(noise_filtered_wp)
|
||||
else:
|
||||
# --- Process R Channel ---
|
||||
print("Processing R channel...")
|
||||
wp.launch(kernel=generate_noise_kernel, dim=(height, width),
|
||||
inputs=[r_original_wp, variance_lut_wp, noise_r_unfiltered_wp, mu_r, sigma_filter, seed], device="cuda")
|
||||
wp.launch(kernel=convolve_2d_kernel, dim=(height, width),
|
||||
inputs=[noise_r_unfiltered_wp, kernel_wp, kernel_radius, noise_r_filtered_wp], device="cuda")
|
||||
|
||||
if channels == 3:
|
||||
# --- Process G Channel ---
|
||||
print("Processing G channel...")
|
||||
wp.launch(kernel=generate_noise_kernel, dim=(height, width),
|
||||
inputs=[g_original_wp, variance_lut_wp, noise_g_unfiltered_wp, mu_r, sigma_filter, seed + 1], device="cuda") # Offset seed
|
||||
wp.launch(kernel=convolve_2d_kernel, dim=(height, width),
|
||||
inputs=[noise_g_unfiltered_wp, kernel_wp, kernel_radius, noise_g_filtered_wp], device="cuda")
|
||||
|
||||
# --- Process B Channel ---
|
||||
print("Processing B channel...")
|
||||
wp.launch(kernel=generate_noise_kernel, dim=(height, width),
|
||||
inputs=[b_original_wp, variance_lut_wp, noise_b_unfiltered_wp, mu_r, sigma_filter, seed + 2], device="cuda") # Offset seed
|
||||
wp.launch(kernel=convolve_2d_kernel, dim=(height, width),
|
||||
inputs=[noise_b_unfiltered_wp, kernel_wp, kernel_radius, noise_b_filtered_wp], device="cuda")
|
||||
else: # Grayscale: copy R channel's filtered noise to G and B components
|
||||
noise_g_filtered_wp.assign(noise_r_filtered_wp) # Use assign for Warp arrays
|
||||
noise_b_filtered_wp.assign(noise_r_filtered_wp)
|
||||
|
||||
|
||||
# --- Add noise and clip ---
|
||||
print("Adding noise to channels and clipping...")
|
||||
r_output_wp = wp.empty_like(r_original_wp)
|
||||
g_output_wp = wp.empty_like(g_original_wp)
|
||||
b_output_wp = wp.empty_like(b_original_wp)
|
||||
|
||||
wp.launch(kernel=add_rgb_noise_and_clip_kernel,
|
||||
dim=(height, width),
|
||||
inputs=[r_original_wp, g_original_wp, b_original_wp,
|
||||
noise_r_filtered_wp, noise_g_filtered_wp, noise_b_filtered_wp,
|
||||
r_output_wp, g_output_wp, b_output_wp],
|
||||
device="cuda")
|
||||
|
||||
# --- Copy back to host ---
|
||||
output_img_np = np.zeros((height,width,3), dtype=np.float32) # Always create 3-channel output buffer
|
||||
output_img_np[:, :, 0] = r_output_wp.numpy()
|
||||
output_img_np[:, :, 1] = g_output_wp.numpy()
|
||||
output_img_np[:, :, 2] = b_output_wp.numpy()
|
||||
|
||||
try:
|
||||
if output_path.lower().endswith('.tiff') or output_path.lower().endswith('.tif'):
|
||||
output_img_uint16 = (output_img_np * 65535.0).clip(0, 65535).astype(np.uint16)
|
||||
iio.imwrite(output_path, output_img_uint16)
|
||||
print(f"Output image saved to {output_path}")
|
||||
elif output_path.lower().endswith('.png'):
|
||||
output_img_uint8 = (output_img_np * 255.0).clip(0, 255).astype(np.uint8)
|
||||
iio.imwrite(output_path, output_img_uint8)
|
||||
print(f"Output image saved to {output_path}")
|
||||
elif output_path.lower().endswith('.jpg') or output_path.lower().endswith('.jpeg'):
|
||||
output_img_uint8 = (output_img_np * 255.0).clip(0, 255).astype(np.uint8)
|
||||
iio.imwrite(output_path, output_img_uint8, quality=95)
|
||||
print(f"Output image saved to {output_path}")
|
||||
except Exception as e:
|
||||
print(f"Error saving image: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Apply realistic film grain (Zhang et al. 2023 method)."
|
||||
)
|
||||
parser.add_argument("input_image", help="Path to the input image (TIFF, PNG, JPG, or RAW (ARW/DNG) format)")
|
||||
parser.add_argument("output_image", help="Path to save the output image (TIFF (16-bit), PNG, JPG format)")
|
||||
parser.add_argument(
|
||||
"--mu_r", type=float, default=0.1, help="Mean grain radius (relative to pixel size)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sigma",
|
||||
type=float,
|
||||
default=0.8,
|
||||
help="Standard deviation of the Gaussian Filter for noise blurring (sigma_filter).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--seed", type=int, default=42, help="Random seed for noise generation"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mono", action="store_true", help="Apply monochrome film grain across channels based on luminance", default=False
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.mu_r <= 0:
|
||||
print("Warning: mu_r should be positive. Using default 0.1")
|
||||
args.mu_r = 0.1
|
||||
if args.sigma <= 0:
|
||||
print("Warning: sigma_filter should be positive. Using default 0.8")
|
||||
args.sigma = 0.8
|
||||
if args.sigma < 3 * args.mu_r:
|
||||
print(
|
||||
f"Warning: sigma_filter ({args.sigma}) is less than 3*mu_r ({3 * args.mu_r:.2f}). Approximations in the model might be less accurate."
|
||||
)
|
||||
|
||||
render_film_grain(
|
||||
args.input_image, args.mu_r, args.sigma, args.output_image, args.seed, args.mono
|
||||
)
|
469
filmscan
Executable file
469
filmscan
Executable file
@ -0,0 +1,469 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# dependencies = [
|
||||
# "numpy",
|
||||
# "scipy",
|
||||
# "Pillow",
|
||||
# "imageio",
|
||||
# "colour-science",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
import numpy as np
|
||||
import imageio.v3 as iio
|
||||
import colour
|
||||
from scipy.ndimage import uniform_filter, maximum_filter, minimum_filter
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# --- Constants from negadoctor.c ---
|
||||
THRESHOLD = 2.3283064365386963e-10
|
||||
# LOG2_TO_LOG10 = 0.3010299956 # This is log10(2)
|
||||
LOG10_2 = np.log10(2.0) # More precise
|
||||
|
||||
# --- Default parameters (from dt_iop_negadoctor_params_t defaults) ---
|
||||
# These are for parameters NOT being auto-detected
|
||||
DEFAULT_WB_HIGH = np.array([1.0, 1.0, 1.0], dtype=np.float32)
|
||||
DEFAULT_WB_LOW = np.array([1.0, 1.0, 1.0], dtype=np.float32)
|
||||
DEFAULT_GAMMA = 4.0
|
||||
DEFAULT_SOFT_CLIP = 0.75
|
||||
# Film stock is implicitly color by using 3-channel Dmin etc.
|
||||
|
||||
# --- Utility Functions ---
|
||||
|
||||
def find_patch_average(image: np.ndarray, center_y: int, center_x: int, patch_size: int) -> np.ndarray:
|
||||
"""Averages pixel values in a square patch around a center point."""
|
||||
half_patch = patch_size // 2
|
||||
y_start = np.clip(center_y - half_patch, 0, image.shape[0] - 1)
|
||||
y_end = np.clip(center_y + half_patch + 1, 0, image.shape[0]) # +1 for slice
|
||||
x_start = np.clip(center_x - half_patch, 0, image.shape[1] - 1)
|
||||
x_end = np.clip(center_x + half_patch + 1, 0, image.shape[1])
|
||||
|
||||
if y_start >= y_end or x_start >= x_end: # Should not happen with proper clipping
|
||||
return image[center_y, center_x]
|
||||
|
||||
patch = image[y_start:y_end, x_start:x_end]
|
||||
return np.mean(patch, axis=(0, 1))
|
||||
|
||||
def get_representative_patch_value(image: np.ndarray, mode: str = 'brightest',
|
||||
patch_size_ratio: float = 1/64, min_patch_size: int = 8,
|
||||
max_patch_size: int = 64) -> np.ndarray:
|
||||
"""
|
||||
Finds the brightest or darkest small patch in an image.
|
||||
'Brightest' on a negative is the film base.
|
||||
'Darkest' on a negative is a highlight in the scene.
|
||||
The mode refers to the luma/intensity.
|
||||
"""
|
||||
if image.ndim != 3 or image.shape[2] != 3:
|
||||
raise ValueError("Image must be an RGB image (H, W, 3)")
|
||||
|
||||
patch_size = int(min(image.shape[0], image.shape[1]) * patch_size_ratio)
|
||||
patch_size = np.clip(patch_size, min_patch_size, max_patch_size)
|
||||
patch_size = max(1, patch_size // 2 * 2 + 1) # Ensure odd for easier centering if needed
|
||||
|
||||
# Use a uniform filter to get local averages, speeds up finding good candidates
|
||||
# Consider image intensity for finding min/max spot
|
||||
if image.shape[0] < patch_size or image.shape[1] < patch_size:
|
||||
# Image too small for filtering, find single pixel and sample around it
|
||||
intensity_map = np.mean(image, axis=2)
|
||||
if mode == 'brightest':
|
||||
center_y, center_x = np.unravel_index(np.argmax(intensity_map), intensity_map.shape)
|
||||
else: # darkest
|
||||
center_y, center_x = np.unravel_index(np.argmin(intensity_map), intensity_map.shape)
|
||||
return find_patch_average(image, center_y, center_x, patch_size)
|
||||
|
||||
|
||||
# For larger images, we can use filters
|
||||
# Calculate a proxy for patch brightness/darkness (e.g. sum of RGB in patch)
|
||||
# A simple way is to find min/max of filtered image
|
||||
# For more robustness, one might sample multiple candidate patches
|
||||
# For simplicity here, find global min/max pixel and sample patch around it.
|
||||
|
||||
intensity_map = np.mean(image, axis=2) # Luminance proxy
|
||||
|
||||
if mode == 'brightest':
|
||||
# Find the brightest pixel
|
||||
flat_idx = np.argmax(intensity_map)
|
||||
else: # darkest
|
||||
# Find the darkest pixel
|
||||
flat_idx = np.argmin(intensity_map)
|
||||
|
||||
center_y, center_x = np.unravel_index(flat_idx, intensity_map.shape)
|
||||
|
||||
# Refine patch location: average a small area around the found extreme pixel
|
||||
# to reduce noise sensitivity.
|
||||
# The patch size for averaging:
|
||||
avg_patch_size = max(3, patch_size // 4) # A smaller patch for local averaging
|
||||
|
||||
return find_patch_average(image, center_y, center_x, avg_patch_size)
|
||||
|
||||
|
||||
# --- Automated Parameter Calculation Functions ---
|
||||
# These functions will use the original input image (img_aces_negative) for sampling,
|
||||
# as implied by the C GUI code `self->picked_color...`
|
||||
|
||||
def auto_calculate_dmin(img_aces_negative: np.ndarray, **kwargs) -> np.ndarray:
|
||||
"""Dmin is the color of the film base (brightest part of the negative)."""
|
||||
# In the C code, Dmin values are typically > 0, often around [1.0, 0.45, 0.25] for color.
|
||||
# These are divisors, so they should not be zero.
|
||||
# The input image is 0-1. A very bright film base might be e.g. 0.8, 0.7, 0.6
|
||||
# The C code Dmin seems to be in a different scale or used differently.
|
||||
# Let's assume picked_color is directly used as Dmin.
|
||||
# "Dmin[0] = 1.00f; Dmin[1] = 0.45f; Dmin[2] = 0.25f;" default
|
||||
# However, `apply_auto_Dmin` sets `p->Dmin[k] = self->picked_color[k]`.
|
||||
# If `picked_color` is from a 0-1 range image, Dmin will also be 0-1.
|
||||
# This implies the input image is used directly.
|
||||
dmin = get_representative_patch_value(img_aces_negative, mode='brightest', **kwargs)
|
||||
return np.maximum(dmin, THRESHOLD) # Ensure Dmin is not too small
|
||||
|
||||
def auto_calculate_dmax(img_aces_negative: np.ndarray, dmin_param: np.ndarray, **kwargs) -> float:
|
||||
"""
|
||||
D_max = v_maxf(log10f(p->Dmin[c] / fmaxf(self->picked_color_min[c], THRESHOLD)))
|
||||
picked_color_min is the darkest patch on the negative (scene highlight).
|
||||
"""
|
||||
darkest_negative_patch = get_representative_patch_value(img_aces_negative, mode='darkest', **kwargs)
|
||||
darkest_negative_patch = np.maximum(darkest_negative_patch, THRESHOLD)
|
||||
|
||||
# Ensure dmin_param and darkest_negative_patch are broadcastable if dmin_param is scalar
|
||||
dmin_param_rgb = np.array(dmin_param, dtype=np.float32)
|
||||
if dmin_param_rgb.ndim == 0:
|
||||
dmin_param_rgb = np.full(3, dmin_param_rgb, dtype=np.float32)
|
||||
|
||||
log_arg = dmin_param_rgb / darkest_negative_patch
|
||||
rgb_dmax_contrib = np.log10(log_arg)
|
||||
d_max = np.max(rgb_dmax_contrib)
|
||||
return max(d_max, 0.1) # D_max must be positive
|
||||
|
||||
def auto_calculate_offset(img_aces_negative: np.ndarray, dmin_param: np.ndarray, d_max_param: float, **kwargs) -> float:
|
||||
"""
|
||||
p->offset = v_minf(log10f(p->Dmin[c] / fmaxf(self->picked_color_max[c], THRESHOLD)) / p->D_max)
|
||||
picked_color_max is the brightest patch on the negative (scene shadow if Dmin is elsewhere, or Dmin itself).
|
||||
For this context, it should be the part of the image that will become the deepest black after inversion,
|
||||
which is *not* Dmin. So it's the brightest part of the *exposed* negative area.
|
||||
This is tricky. If Dmin is picked from unexposed film edge, then picked_color_max is the brightest *image* area.
|
||||
If Dmin is picked from brightest *image* area (no unexposed edge), then this becomes circular.
|
||||
Let's assume Dmin was found correctly from the film base (unexposed or lightest part).
|
||||
Then `picked_color_max` here refers to the brightest *actual image content* on the negative,
|
||||
which will become the *darkest shadows* in the positive.
|
||||
So we still use 'brightest' mode for `get_representative_patch_value` on the negative.
|
||||
"""
|
||||
brightest_negative_patch = get_representative_patch_value(img_aces_negative, mode='brightest', **kwargs)
|
||||
brightest_negative_patch = np.maximum(brightest_negative_patch, THRESHOLD)
|
||||
|
||||
dmin_param_rgb = np.array(dmin_param, dtype=np.float32)
|
||||
if dmin_param_rgb.ndim == 0:
|
||||
dmin_param_rgb = np.full(3, dmin_param_rgb, dtype=np.float32)
|
||||
|
||||
log_arg = dmin_param_rgb / brightest_negative_patch
|
||||
rgb_offset_contrib = np.log10(log_arg) / d_max_param
|
||||
offset = np.min(rgb_offset_contrib)
|
||||
return offset # Can be negative
|
||||
|
||||
def auto_calculate_paper_black(img_aces_negative: np.ndarray, dmin_param: np.ndarray, d_max_param: float,
|
||||
offset_param: float, wb_high_param: np.ndarray, wb_low_param: np.ndarray,
|
||||
**kwargs) -> float:
|
||||
"""
|
||||
p->black = v_maxf(RGB)
|
||||
RGB[c] = -log10f(p->Dmin[c] / fmaxf(self->picked_color_max[c], THRESHOLD));
|
||||
RGB[c] *= p->wb_high[c] / p->D_max;
|
||||
RGB[c] += p->wb_low[c] * p->offset * p->wb_high[c]; # Error in my C-analysis: This wb_high is p->wb_high, not d->wb_high
|
||||
# d->offset already has p->wb_high.
|
||||
# Corrected: RGB[c] += p->wb_low[c] * p->offset * (p->wb_high[c] / p->D_max); -- NO
|
||||
# C code: d->offset[c] = p->wb_high[c] * p->offset * p->wb_low[c];
|
||||
# corrected_de[c] = (p->wb_high[c]/p->D_max) * log_density[c] + d->offset[c]
|
||||
# This means the auto_black formula needs to use the same terms for consistency.
|
||||
Let's re-evaluate the C `apply_auto_black`:
|
||||
RGB_log_density_term[c] = -log10f(p->Dmin[c] / fmaxf(self->picked_color_max[c], THRESHOLD))
|
||||
RGB_density_corrected_term[c] = (p->wb_high[c] / p->D_max) * RGB_log_density_term[c] + \
|
||||
(p->wb_high[c] * p->offset * p->wb_low[c])
|
||||
RGB[c] = 0.1f - (1.0f - fast_exp10f(RGB_density_corrected_term[c]));
|
||||
p->black = v_maxf(RGB);
|
||||
`picked_color_max` is the brightest patch on the negative.
|
||||
"""
|
||||
brightest_negative_patch = get_representative_patch_value(img_aces_negative, mode='brightest', **kwargs)
|
||||
brightest_negative_patch = np.maximum(brightest_negative_patch, THRESHOLD)
|
||||
|
||||
dmin_param_rgb = np.array(dmin_param, dtype=np.float32)
|
||||
if dmin_param_rgb.ndim == 0:
|
||||
dmin_param_rgb = np.full(3, dmin_param_rgb, dtype=np.float32)
|
||||
|
||||
log_density_term = -np.log10(dmin_param_rgb / brightest_negative_patch)
|
||||
|
||||
# This is `corrected_de` for the brightest_negative_patch
|
||||
density_corrected_term = (wb_high_param / d_max_param) * log_density_term + \
|
||||
(wb_high_param * offset_param * wb_low_param) # This matches d->offset structure
|
||||
|
||||
val_for_black_calc = 0.1 - (1.0 - np.power(10.0, density_corrected_term))
|
||||
paper_black = np.max(val_for_black_calc)
|
||||
return paper_black
|
||||
|
||||
def auto_calculate_print_exposure(img_aces_negative: np.ndarray, dmin_param: np.ndarray, d_max_param: float,
|
||||
offset_param: float, paper_black_param: float,
|
||||
wb_high_param: np.ndarray, wb_low_param: np.ndarray,
|
||||
**kwargs) -> float:
|
||||
"""
|
||||
p->exposure = v_minf(RGB)
|
||||
RGB[c] = -log10f(p->Dmin[c] / fmaxf(self->picked_color_min[c], THRESHOLD));
|
||||
RGB[c] *= p->wb_high[c] / p->D_max;
|
||||
RGB[c] += p->wb_low[c] * p->offset * p->wb_high_param[c]; // Similar correction as paper_black
|
||||
// This should be p->wb_low[c] * d->offset[c] using the effective offset
|
||||
// which is p->wb_low[c] * (p->offset * p->wb_high[c])
|
||||
Let's re-evaluate C `apply_auto_exposure`:
|
||||
RGB_log_density_term[c] = -log10f(p->Dmin[c] / fmaxf(self->picked_color_min[c], THRESHOLD))
|
||||
RGB_density_corrected_term[c] = (p->wb_high[c] / p->D_max) * RGB_log_density_term[c] + \
|
||||
(p->wb_high[c] * p->offset * p->wb_low[c])
|
||||
RGB[c] = 0.96f / ( (1.0f - fast_exp10f(RGB_density_corrected_term[c])) + p->black );
|
||||
p->exposure = v_minf(RGB);
|
||||
`picked_color_min` is the darkest patch on the negative.
|
||||
"""
|
||||
darkest_negative_patch = get_representative_patch_value(img_aces_negative, mode='darkest', **kwargs)
|
||||
darkest_negative_patch = np.maximum(darkest_negative_patch, THRESHOLD)
|
||||
|
||||
dmin_param_rgb = np.array(dmin_param, dtype=np.float32)
|
||||
if dmin_param_rgb.ndim == 0:
|
||||
dmin_param_rgb = np.full(3, dmin_param_rgb, dtype=np.float32)
|
||||
|
||||
log_density_term = -np.log10(dmin_param_rgb / darkest_negative_patch)
|
||||
|
||||
density_corrected_term = (wb_high_param / d_max_param) * log_density_term + \
|
||||
(wb_high_param * offset_param * wb_low_param)
|
||||
|
||||
denominator = (1.0 - np.power(10.0, density_corrected_term)) + paper_black_param
|
||||
# Avoid division by zero or very small numbers if denominator is problematic
|
||||
denominator = np.where(np.abs(denominator) < THRESHOLD, np.sign(denominator + THRESHOLD) * THRESHOLD, denominator)
|
||||
|
||||
val_for_exposure_calc = 0.96 / denominator
|
||||
print_exposure = np.min(val_for_exposure_calc)
|
||||
return max(print_exposure, 0.01) # Ensure exposure is positive
|
||||
|
||||
# --- Core Negadoctor Process ---
|
||||
def negadoctor_process(img_aces_negative: np.ndarray,
|
||||
dmin_param: np.ndarray,
|
||||
wb_high_param: np.ndarray,
|
||||
wb_low_param: np.ndarray,
|
||||
d_max_param: float,
|
||||
offset_param: float, # scan exposure bias
|
||||
paper_black_param: float, # paper black (density correction)
|
||||
gamma_param: float, # paper grade (gamma)
|
||||
soft_clip_param: float, # paper gloss (specular highlights)
|
||||
print_exposure_param: float # print exposure adjustment
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Applies the negadoctor calculations based on `_process_pixel` and `commit_params`.
|
||||
Input image and Dmin are expected to be in a compatible range (e.g., 0-1).
|
||||
"""
|
||||
# Ensure params are numpy arrays for broadcasting
|
||||
dmin_param = np.array(dmin_param, dtype=np.float32).reshape(1, 1, 3)
|
||||
wb_high_param = np.array(wb_high_param, dtype=np.float32).reshape(1, 1, 3)
|
||||
wb_low_param = np.array(wb_low_param, dtype=np.float32).reshape(1, 1, 3)
|
||||
|
||||
# From commit_params:
|
||||
# d->wb_high[c] = p->wb_high[c] / p->D_max;
|
||||
effective_wb_high = wb_high_param / d_max_param
|
||||
|
||||
# d->offset[c] = p->wb_high[c] * p->offset * p->wb_low[c];
|
||||
# Note: p->offset is scalar offset_param
|
||||
effective_offset = wb_high_param * offset_param * wb_low_param
|
||||
|
||||
# d->black = -p->exposure * (1.0f + p->black);
|
||||
# Note: p->exposure is scalar print_exposure_param, p->black is scalar paper_black_param
|
||||
effective_paper_black = -print_exposure_param * (1.0 + paper_black_param)
|
||||
|
||||
# d->soft_clip_comp = 1.0f - p->soft_clip;
|
||||
soft_clip_comp = 1.0 - soft_clip_param
|
||||
|
||||
# --- _process_pixel logic ---
|
||||
# 1. Convert transmission to density using Dmin as a fulcrum
|
||||
# density[c] = Dmin[c] / clamped[c];
|
||||
# log_density[c] = log2(density[c]) * -LOG2_to_LOG10 = -log10(density[c])
|
||||
clamped_input = np.maximum(img_aces_negative, THRESHOLD)
|
||||
density = dmin_param / clamped_input
|
||||
log_density = -np.log10(density) # This is log10(clamped_input / dmin_param)
|
||||
|
||||
# 2. Correct density in log space
|
||||
# corrected_de[c] = effective_wb_high[c] * log_density[c] + effective_offset[c];
|
||||
corrected_density = effective_wb_high * log_density + effective_offset
|
||||
|
||||
# 3. Print density on paper
|
||||
# print_linear[c] = -(exposure[c] * ten_to_x[c] + black[c]);
|
||||
# exposure[c] is print_exposure_param, black[c] is effective_paper_black
|
||||
# ten_to_x is 10^corrected_density
|
||||
ten_to_corrected_density = np.power(10.0, corrected_density)
|
||||
print_linear = -(print_exposure_param * ten_to_corrected_density + effective_paper_black)
|
||||
print_linear = np.maximum(print_linear, 0.0)
|
||||
|
||||
# 4. Apply paper grade (gamma)
|
||||
# print_gamma = print_linear ^ gamma_param
|
||||
print_gamma = np.power(print_linear, gamma_param)
|
||||
|
||||
# 5. Compress highlights (soft clip)
|
||||
# pix_out[c] = (print_gamma[c] > soft_clip[c])
|
||||
# ? soft_clip[c] + (1.0f - e_to_gamma[c]) * soft_clip_comp[c]
|
||||
# : print_gamma[c];
|
||||
# where e_to_gamma[c] = exp(-(print_gamma[c] - soft_clip[c]) / soft_clip_comp[c])
|
||||
|
||||
# Avoid issues with soft_clip_comp being zero if soft_clip_param is 1.0
|
||||
if np.isclose(soft_clip_comp, 0.0):
|
||||
output_pixels = np.where(print_gamma > soft_clip_param, soft_clip_param, print_gamma)
|
||||
else:
|
||||
exponent = -(print_gamma - soft_clip_param) / soft_clip_comp
|
||||
# Clip exponent to avoid overflow in np.exp for very large negative print_gamma values
|
||||
exponent = np.clip(exponent, -700, 700) # exp(-709) is ~0, exp(709) is ~inf
|
||||
e_to_gamma = np.exp(exponent)
|
||||
|
||||
compressed_highlights = soft_clip_param + (1.0 - e_to_gamma) * soft_clip_comp
|
||||
output_pixels = np.where(print_gamma > soft_clip_param, compressed_highlights, print_gamma)
|
||||
|
||||
return np.clip(output_pixels, 0.0, 1.0) # Final clip to 0-1 range
|
||||
|
||||
|
||||
# --- Main Execution ---
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Python implementation of Darktable's Negadoctor module.")
|
||||
parser.add_argument("input_file", help="Path to the input negative image (TIFF).")
|
||||
parser.add_argument("output_file", help="Path to save the processed positive image.")
|
||||
parser.add_argument("--patch_size_ratio", type=float, default=1/128, help="Ratio of image min dim for patch size in auto-detection.") # smaller default
|
||||
parser.add_argument("--min_patch_size", type=int, default=8, help="Minimum patch size in pixels.")
|
||||
parser.add_argument("--max_patch_size", type=int, default=32, help="Maximum patch size in pixels.") # smaller default
|
||||
parser.add_argument("--aces_transform", default="ACEScg", help="ACES working space (e.g., ACEScg, ACEScc, ACEScct).")
|
||||
parser.add_argument("--output_colorspace", default="sRGB", help="Colorspace for output image (e.g., sRGB, Display P3).")
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"Loading image: {args.input_file}")
|
||||
try:
|
||||
img_raw = iio.imread(args.input_file)
|
||||
except Exception as e:
|
||||
print(f"Error loading image: {e}")
|
||||
exit(1)
|
||||
|
||||
print(f"Original image dtype: {img_raw.dtype}, shape: {img_raw.shape}, min: {img_raw.min()}, max: {img_raw.max()}")
|
||||
|
||||
if img_raw.dtype == np.uint16:
|
||||
img_float = img_raw.astype(np.float32) / 65535.0
|
||||
elif img_raw.dtype == np.uint8:
|
||||
img_float = img_raw.astype(np.float32) / 255.0
|
||||
elif img_raw.dtype == np.float32 or img_raw.dtype == np.float64:
|
||||
if img_raw.max() > 1.01 : # Check if it's not already 0-1
|
||||
print("Warning: Input float image max > 1.0. Assuming it's not normalized. Clamping and scaling may occur.")
|
||||
img_float = np.clip(img_raw, 0, None).astype(np.float32) # Needs proper scaling if not 0-1
|
||||
if img_float.max() > 1.0: # if it's still high, e.g. integer range float
|
||||
if np.percentile(img_float, 99.9) < 256: # likely 8-bit range
|
||||
img_float /= 255.0
|
||||
elif np.percentile(img_float, 99.9) < 65536: # likely 16-bit range
|
||||
img_float /= 65535.0
|
||||
else: # unknown large float range
|
||||
print("Warning: Unknown float range. Trying to normalize by max value.")
|
||||
img_float /= img_float.max()
|
||||
else:
|
||||
img_float = img_raw.astype(np.float32)
|
||||
else:
|
||||
raise ValueError(f"Unsupported image dtype: {img_raw.dtype}")
|
||||
|
||||
img_float = np.clip(img_float, 0.0, 1.0)
|
||||
if img_float.ndim == 2: # Grayscale
|
||||
img_float = np.stack([img_float]*3, axis=-1) # make 3-channel
|
||||
|
||||
# Assuming input TIFF is sRGB encoded (common for scans unless specified)
|
||||
# Convert to linear sRGB first, then to ACEScg
|
||||
print("Converting to ACEScg...")
|
||||
# img_linear_srgb = colour.gamma_correct(img_float, 1/2.2, 'ITU-R BT.709') # Approximate sRGB EOTF decoding
|
||||
img_linear_srgb = colour.models.eotf_sRGB(img_float) # More accurate sRGB EOTF decoding
|
||||
img_acescg = colour.RGB_to_RGB(img_linear_srgb,
|
||||
colour.models.RGB_COLOURSPACE_sRGB,
|
||||
colour.models.RGB_COLOURSPACE_ACESCG)
|
||||
img_acescg = np.clip(img_acescg, 0.0, None) # ACEScg can have values > 1.0 for very bright sources
|
||||
|
||||
print(f"Image in ACEScg: shape: {img_acescg.shape}, min: {img_acescg.min():.4f}, max: {img_acescg.max():.4f}, mean: {img_acescg.mean():.4f}")
|
||||
|
||||
# Automated parameter detection
|
||||
patch_kwargs = {
|
||||
"patch_size_ratio": args.patch_size_ratio,
|
||||
"min_patch_size": args.min_patch_size,
|
||||
"max_patch_size": args.max_patch_size
|
||||
}
|
||||
|
||||
print("Auto-detecting parameters...")
|
||||
param_dmin = auto_calculate_dmin(img_acescg, **patch_kwargs)
|
||||
print(f" Dmin: {param_dmin}")
|
||||
|
||||
param_d_max = auto_calculate_dmax(img_acescg, param_dmin, **patch_kwargs)
|
||||
print(f" D_max: {param_d_max:.4f}")
|
||||
|
||||
param_offset = auto_calculate_offset(img_acescg, param_dmin, param_d_max, **patch_kwargs)
|
||||
print(f" Offset (Scan Bias): {param_offset:.4f}")
|
||||
|
||||
param_paper_black = auto_calculate_paper_black(img_acescg, param_dmin, param_d_max, param_offset,
|
||||
DEFAULT_WB_HIGH, DEFAULT_WB_LOW, **patch_kwargs)
|
||||
print(f" Paper Black: {param_paper_black:.4f}")
|
||||
|
||||
param_print_exposure = auto_calculate_print_exposure(img_acescg, param_dmin, param_d_max, param_offset,
|
||||
param_paper_black, DEFAULT_WB_HIGH, DEFAULT_WB_LOW,
|
||||
**patch_kwargs)
|
||||
print(f" Print Exposure: {param_print_exposure:.4f}")
|
||||
|
||||
# Perform Negadoctor processing
|
||||
print("Applying Negadoctor process...")
|
||||
img_processed_acescg = negadoctor_process(
|
||||
img_acescg,
|
||||
dmin_param=param_dmin,
|
||||
wb_high_param=DEFAULT_WB_HIGH,
|
||||
wb_low_param=DEFAULT_WB_LOW,
|
||||
d_max_param=param_d_max,
|
||||
offset_param=param_offset,
|
||||
paper_black_param=param_paper_black,
|
||||
gamma_param=DEFAULT_GAMMA,
|
||||
soft_clip_param=DEFAULT_SOFT_CLIP,
|
||||
print_exposure_param=param_print_exposure
|
||||
)
|
||||
print(f"Processed (ACEScg): min: {img_processed_acescg.min():.4f}, max: {img_processed_acescg.max():.4f}, mean: {img_processed_acescg.mean():.4f}")
|
||||
|
||||
|
||||
# Convert back to output colorspace (e.g., sRGB)
|
||||
print(f"Converting from ACEScg to {args.output_colorspace}...")
|
||||
if args.output_colorspace.upper() == 'SRGB':
|
||||
output_cs = colour.models.RGB_COLOURSPACE_sRGB
|
||||
img_out_linear = colour.RGB_to_RGB(img_processed_acescg,
|
||||
colour.models.RGB_COLOURSPACE_ACESCG,
|
||||
output_cs)
|
||||
img_out_linear = np.clip(img_out_linear, 0.0, 1.0) # Clip before gamma correction
|
||||
# img_out_gamma = colour.models.oetf_sRGB(img_out_linear) # Accurate sRGB OETF
|
||||
img_out_gamma = colour.models.eotf_inverse_sRGB(img_out_linear) # Accurate sRGB OETF
|
||||
|
||||
elif args.output_colorspace.upper() == 'DISPLAY P3':
|
||||
output_cs = colour.models.RGB_COLOURSPACE_DISPLAY_P3
|
||||
img_out_linear = colour.RGB_to_RGB(img_processed_acescg,
|
||||
colour.models.RGB_COLOURSPACE_ACESCG,
|
||||
output_cs)
|
||||
img_out_linear = np.clip(img_out_linear, 0.0, 1.0)
|
||||
img_out_gamma = colour.models.eotf_inverse_sRGB(img_out_linear) # Display P3 uses sRGB transfer function
|
||||
else:
|
||||
print(f"Warning: Unsupported output colorspace {args.output_colorspace}. Defaulting to sRGB.")
|
||||
output_cs = colour.models.RGB_COLOURSPACE_sRGB
|
||||
img_out_linear = colour.RGB_to_RGB(img_processed_acescg,
|
||||
colour.models.RGB_COLOURSPACE_ACESCG,
|
||||
output_cs)
|
||||
img_out_linear = np.clip(img_out_linear, 0.0, 1.0)
|
||||
img_out_gamma = colour.models.eotf_inverse_sRGB(img_out_linear)
|
||||
|
||||
|
||||
img_out_final = np.clip(img_out_gamma, 0.0, 1.0)
|
||||
print(f"Final output image: min: {img_out_final.min():.4f}, max: {img_out_final.max():.4f}, mean: {img_out_final.mean():.4f}")
|
||||
|
||||
# Save the image
|
||||
output_path = Path(args.output_file)
|
||||
if output_path.suffix.lower() in ['.tif', '.tiff']:
|
||||
img_to_save = (img_out_final * 65535.0).astype(np.uint16)
|
||||
print(f"Saving 16-bit TIFF: {args.output_file}")
|
||||
else:
|
||||
img_to_save = (img_out_final * 255.0).astype(np.uint8)
|
||||
print(f"Saving 8-bit image (e.g. PNG/JPG): {args.output_file}")
|
||||
|
||||
try:
|
||||
iio.imwrite(args.output_file, img_to_save)
|
||||
print("Processing complete.")
|
||||
except Exception as e:
|
||||
print(f"Error saving image: {e}")
|
||||
exit(1)
|
6
main.py
Normal file
6
main.py
Normal file
@ -0,0 +1,6 @@
|
||||
def main():
|
||||
print("Hello from filmsim!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
17
pyproject.toml
Normal file
17
pyproject.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[project]
|
||||
name = "filmsim"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.13"
|
||||
dependencies = [
|
||||
"colour-science>=0.4.6",
|
||||
"imageio>=2.37.0",
|
||||
"jupyter>=1.1.1",
|
||||
"jupyterlab>=4.4.3",
|
||||
"numpy>=2.2.6",
|
||||
"pillow>=11.2.1",
|
||||
"rawpy>=0.25.0",
|
||||
"scipy>=1.15.3",
|
||||
"warp-lang>=1.7.2",
|
||||
]
|
140
sim_data/portra_400.json
Normal file
140
sim_data/portra_400.json
Normal file
@ -0,0 +1,140 @@
|
||||
{
|
||||
"info": {
|
||||
"name": "Portra 400",
|
||||
"description": "KODAK PROFESSIONAL PORTRA 400 is the world's finest grain high-speed color negative film. At true ISO 400 speed, this film delivers spectacular skin tones plus exceptional color saturation over a wide range of lighting conditions. PORTRA 400 Film is the ideal choice for portrait and fashion photography, as well as for nature, travel and outdoor photography, where the action is fast or the lighting can't be controlled.",
|
||||
"format_mm": 35,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"processing": {
|
||||
"gamma": {
|
||||
"r_factor": 1.0,
|
||||
"g_factor": 1.0,
|
||||
"b_factor": 1.0
|
||||
},
|
||||
"balance": {
|
||||
"r_shift": 0.0,
|
||||
"g_shift": 0.0,
|
||||
"b_shift": 0.0
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"calibration": {
|
||||
"iso": 400,
|
||||
"middle_gray_logh": -1.44
|
||||
},
|
||||
"halation": {
|
||||
"strength": {
|
||||
"r": 0.015,
|
||||
"g": 0.007,
|
||||
"b": 0.002
|
||||
},
|
||||
"size_um": {
|
||||
"r": 200.0,
|
||||
"g": 100.0,
|
||||
"b": 50.0
|
||||
}
|
||||
},
|
||||
"couplers": {
|
||||
"amount": 1.0,
|
||||
"diffusion_um": 5.0
|
||||
},
|
||||
"interlayer": {
|
||||
"diffusion_um": 2.1
|
||||
},
|
||||
"curves": {
|
||||
"hd": [
|
||||
{ "d": -3.4, "r": 0.213, "g": 0.6402, "b": 0.8619 },
|
||||
{ "d": -3.3, "r": 0.2156, "g": 0.6441, "b": 0.8616 },
|
||||
{ "d": -3.2, "r": 0.2182, "g": 0.6482, "b": 0.867 },
|
||||
{ "d": -3.1, "r": 0.2219, "g": 0.6524, "b": 0.8749 },
|
||||
{ "d": -3, "r": 0.2263, "g": 0.656, "b": 0.8852 },
|
||||
{ "d": -2.9, "r": 0.2307, "g": 0.6589, "b": 0.9079 },
|
||||
{ "d": -2.8, "r": 0.2455, "g": 0.677, "b": 0.9413 },
|
||||
{ "d": -2.7, "r": 0.2653, "g": 0.702, "b": 0.9823 },
|
||||
{ "d": -2.6, "r": 0.3005, "g": 0.735, "b": 1.0363 },
|
||||
{ "d": -2.5, "r": 0.3373, "g": 0.7768, "b": 1.0943 },
|
||||
{ "d": -2.4, "r": 0.3848, "g": 0.8275, "b": 1.1578 },
|
||||
{ "d": -2.3, "r": 0.4354, "g": 0.879, "b": 1.2213 },
|
||||
{ "d": -2.2, "r": 0.4885, "g": 0.9338, "b": 1.2848 },
|
||||
{ "d": -2.1, "r": 0.5424, "g": 0.9885, "b": 1.3482 },
|
||||
{ "d": -2, "r": 0.597, "g": 1.0433, "b": 1.4117 },
|
||||
{ "d": -1.9, "r": 0.6516, "g": 1.098, "b": 1.4752 },
|
||||
{ "d": -1.8, "r": 0.7062, "g": 1.1527, "b": 1.5387 },
|
||||
{ "d": -1.7, "r": 0.7608, "g": 1.2075, "b": 1.6021 },
|
||||
{ "d": -1.6, "r": 0.8154, "g": 1.2622, "b": 1.6656 },
|
||||
{ "d": -1.5, "r": 0.87, "g": 1.317, "b": 1.7291 },
|
||||
{ "d": -1.4, "r": 0.9246, "g": 1.3717, "b": 1.7926 },
|
||||
{ "d": -1.3, "r": 0.9792, "g": 1.4264, "b": 1.856 },
|
||||
{ "d": -1.2, "r": 1.0338, "g": 1.4812, "b": 1.9195 },
|
||||
{ "d": -1.1, "r": 1.0883, "g": 1.5359, "b": 1.983 },
|
||||
{ "d": -1, "r": 1.1429, "g": 1.5907, "b": 2.0465 },
|
||||
{ "d": -0.9, "r": 1.1975, "g": 1.6454, "b": 2.1099 },
|
||||
{ "d": -0.8, "r": 1.2521, "g": 1.7002, "b": 2.1734 },
|
||||
{ "d": -0.7, "r": 1.3067, "g": 1.7549, "b": 2.2369 },
|
||||
{ "d": -0.6, "r": 1.3613, "g": 1.8096, "b": 2.3004 },
|
||||
{ "d": -0.5, "r": 1.4159, "g": 1.8644, "b": 2.3638 },
|
||||
{ "d": -0.4, "r": 1.4705, "g": 1.9191, "b": 2.4273 },
|
||||
{ "d": -0.3, "r": 1.5251, "g": 1.9739, "b": 2.4908 },
|
||||
{ "d": -0.2, "r": 1.5797, "g": 2.0286, "b": 2.5543 },
|
||||
{ "d": -0.1, "r": 1.6343, "g": 2.0834, "b": 2.6177 },
|
||||
{ "d": 0, "r": 1.6889, "g": 2.1381, "b": 2.6812 },
|
||||
{ "d": 0.1, "r": 1.7435, "g": 2.1928, "b": 2.7447 },
|
||||
{ "d": 0.2, "r": 1.7981, "g": 2.2476, "b": 2.8082 },
|
||||
{ "d": 0.3, "r": 1.8527, "g": 2.3023, "b": 2.8716 },
|
||||
{ "d": 0.4, "r": 1.9073, "g": 2.3571, "b": 2.9351 },
|
||||
{ "d": 0.5, "r": 1.9619, "g": 2.4118, "b": 2.9986 }
|
||||
],
|
||||
"spectral_sensitivity" : [
|
||||
{ "wavelength": 379.664, "y": 1.715, "m": 0.00, "c": 0.00 },
|
||||
{ "wavelength": 385.87, "y": 2.019, "m": 0.00, "c": 0.00 },
|
||||
{ "wavelength": 392.077, "y": 2.294, "m": 1.311, "c": 0.00 },
|
||||
{ "wavelength": 398.283, "y": 2.51, "m": 1.468, "c": 0.00 },
|
||||
{ "wavelength": 404.489, "y": 2.589, "m": 1.566, "c": 0.00 },
|
||||
{ "wavelength": 410.695, "y": 2.579, "m": 1.527, "c": 0.00 },
|
||||
{ "wavelength": 416.901, "y": 2.53, "m": 1.468, "c": 0.00 },
|
||||
{ "wavelength": 423.108, "y": 2.549, "m": 1.409, "c": 0.00 },
|
||||
{ "wavelength": 429.314, "y": 2.549, "m": 1.359, "c": 0.00 },
|
||||
{ "wavelength": 435.52, "y": 2.539, "m": 1.33, "c": 0.00 },
|
||||
{ "wavelength": 441.726, "y": 2.529, "m": 1.31, "c": 0.00 },
|
||||
{ "wavelength": 447.933, "y": 2.51, "m": 1.3, "c": 0.00 },
|
||||
{ "wavelength": 454.139, "y": 2.5, "m": 1.31, "c": 0.00 },
|
||||
{ "wavelength": 460.345, "y": 2.51, "m": 1.32, "c": 0.00 },
|
||||
{ "wavelength": 466.551, "y": 2.569, "m": 1.33, "c": 0.00 },
|
||||
{ "wavelength": 472.757, "y": 2.539, "m": 1.408, "c": 0.00 },
|
||||
{ "wavelength": 478.964, "y": 2.358, "m": 1.585, "c": 0.00 },
|
||||
{ "wavelength": 485.17, "y": 2.038, "m": 1.723, "c": 0.00 },
|
||||
{ "wavelength": 491.376, "y": 1.596, "m": 1.88, "c": 0.399 },
|
||||
{ "wavelength": 497.582, "y": 1.288, "m": 1.988, "c": 0.47 },
|
||||
{ "wavelength": 503.788, "y": 1.095, "m": 2.037, "c": 0.549 },
|
||||
{ "wavelength": 509.995, "y": 0.81, "m": 2.086, "c": 0.644 },
|
||||
{ "wavelength": 516.201, "y": 0.486, "m": 2.135, "c": 0.706 },
|
||||
{ "wavelength": 522.407, "y": 0.00, "m": 2.194, "c": 0.765 },
|
||||
{ "wavelength": 528.613, "y": 0.00, "m": 2.253, "c": 0.804 },
|
||||
{ "wavelength": 534.82, "y": 0.00, "m": 2.361, "c": 0.804 },
|
||||
{ "wavelength": 541.026, "y": 0.00, "m": 2.43, "c": 0.775 },
|
||||
{ "wavelength": 547.232, "y": 0.00, "m": 2.488, "c": 0.774 },
|
||||
{ "wavelength": 553.438, "y": 0.00, "m": 2.518, "c": 0.833 },
|
||||
{ "wavelength": 559.644, "y": 0.00, "m": 2.479, "c": 0.981 },
|
||||
{ "wavelength": 565.851, "y": 0.00, "m": 2.4, "c": 1.138 },
|
||||
{ "wavelength": 572.057, "y": 0.00, "m": 2.311, "c": 1.315 },
|
||||
{ "wavelength": 578.263, "y": 0.00, "m": 2.213, "c": 1.599 },
|
||||
{ "wavelength": 584.469, "y": 0.00, "m": 1.854, "c": 1.796 },
|
||||
{ "wavelength": 590.675, "y": 0.00, "m": 1.504, "c": 1.982 },
|
||||
{ "wavelength": 596.882, "y": 0.00, "m": 1.113, "c": 2.09 },
|
||||
{ "wavelength": 603.088, "y": 0.00, "m": 0.00, "c": 2.159 },
|
||||
{ "wavelength": 609.294, "y": 0.00, "m": 0.00, "c": 2.238 },
|
||||
{ "wavelength": 615.5, "y": 0.00, "m": 0.00, "c": 2.297 },
|
||||
{ "wavelength": 621.707, "y": 0.00, "m": 0.00, "c": 2.355 },
|
||||
{ "wavelength": 627.913, "y": 0.00, "m": 0.00, "c": 2.385 },
|
||||
{ "wavelength": 634.119, "y": 0.00, "m": 0.00, "c": 2.385 },
|
||||
{ "wavelength": 640.325, "y": 0.00, "m": 0.00, "c": 2.414 },
|
||||
{ "wavelength": 646.531, "y": 0.00, "m": 0.00, "c": 2.522 },
|
||||
{ "wavelength": 652.738, "y": 0.00, "m": 0.00, "c": 2.601 },
|
||||
{ "wavelength": 658.944, "y": 0.00, "m": 0.00, "c": 2.571 },
|
||||
{ "wavelength": 671.356, "y": 0.00, "m": 0.00, "c": 1.805 },
|
||||
{ "wavelength": 677.562, "y": 0.00, "m": 0.00, "c": 1.132 },
|
||||
{ "wavelength": 683.769, "y": 0.00, "m": 0.00, "c": 0.744 }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
BIN
test_images/Baseline JPEG/01.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/01.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/02.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/02.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/03.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/03.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/04.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/04.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/05.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/05.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/06.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/06.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/07.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/07.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/08.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/08.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/09.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/09.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/Baseline JPEG/10.JPG
(Stored with Git LFS)
Executable file
BIN
test_images/Baseline JPEG/10.JPG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/01.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/01.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/02.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/02.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/03.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/03.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/04.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/04.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/05.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/05.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/06.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/06.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/07.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/07.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/08.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/08.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/09.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/09.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/RAW/10.DNG
(Stored with Git LFS)
Executable file
BIN
test_images/RAW/10.DNG
(Stored with Git LFS)
Executable file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmcolor/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmcolor/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainmono/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainmono/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmgrainrgb/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmgrainrgb/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/01.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/02.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/03.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/04.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/05.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/06.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/07.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/08.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/09.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
test_images/v1.0output/filmscan/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
BIN
test_images/v1.0output/filmscan/10.DNG.tiff.jpg
(Stored with Git LFS)
Normal file
Binary file not shown.
Reference in New Issue
Block a user