tedit/llm.md
2025-04-13 14:52:23 -04:00

173 KiB

main.cpp

// Dear ImGui: standalone example application for SDL2 + OpenGL
// (SDL is a cross-platform general purpose library for handling windows, inputs, OpenGL/Vulkan/Metal graphics context creation, etc.)

// Learn about Dear ImGui:
// - FAQ                  https://dearimgui.com/faq
// - Getting Started      https://dearimgui.com/getting-started
// - Documentation        https://dearimgui.com/docs (same as your local docs/ folder).
// - Introduction, links and more at the top of imgui.cpp

#define IMGUI_DEFINE_MATH_OPERATORS
#include <GL/glew.h>
#include "imgui.h"
#include "imgui_impl_sdl2.h"
#include "imgui_impl_opengl3.h"
#include <stdio.h>
#include <SDL.h>
#if defined(IMGUI_IMPL_OPENGL_ES2)
#include <SDL_opengles2.h>
#else
#include <SDL_opengl.h>
#include <GL/gl.h>
#endif

// This example can also compile and run with Emscripten! See 'Makefile.emscripten' for details.
#ifdef __EMSCRIPTEN__
#include "../libs/emscripten/emscripten_mainloop_stub.h"
#endif

#include "exif.h"

#define APP_IMAGE_IMPLEMENTATION
#define IMGUI_IMAGE_VIEWER_IMPLEMENTATION

#include "app_image.h"
#include "tex_inspect_opengl.h"
#include "imgui_tex_inspect.h"
#include "shaderutils.h"

static float exposure = 0.0f;
static float contrast = 0.0f;
static float highlights = 0.0f;
static float shadows = 0.0f;
static float whites = 0.0f;
static float blacks = 0.0f;
static float temperature = 6500.0f; // Example starting point (Kelvin)
static float tint = 0.0f;
static float vibrance = 0.0f;
static float saturation = 0.0f;
static float clarity = 0.0f;
static float texture = 0.0f;
static float dehaze = 0.0f;

#include <string>
#include <vector>
#include <map>
#include <functional> // For std::function
#include <memory>     // For unique_ptr

#include "imfilebrowser.h" // <<< Add this
#include <filesystem>      // <<< Add for path manipulation (C++17)

struct ShaderUniform
{
    std::string name;
    GLint location = -1;
    // Add type info if needed for different glUniform calls, or handle in setter
};

struct PipelineOperation
{
    std::string name;
    GLuint shaderProgram = 0;
    bool enabled = true;
    std::map<std::string, ShaderUniform> uniforms; // Map uniform name to its info

    // Function to update uniforms based on global slider values etc.
    std::function<void(GLuint /*program*/)> updateUniformsCallback;

    // Store the actual slider variable pointers for direct modification in ImGui
    // This avoids needing complex callbacks for simple sliders
    float *exposureVal = nullptr;
    float *contrastVal = nullptr;
    float *highlightsVal = nullptr;
    float *shadowsVal = nullptr;
    float *whitesVal = nullptr;
    float *blacksVal = nullptr;
    float *temperatureVal = nullptr;
    float *tintVal = nullptr;
    float *vibranceVal = nullptr;
    float *saturationVal = nullptr;
    float *clarityVal = nullptr;
    float *textureVal = nullptr;
    float *dehazeVal = nullptr;
    // ... add pointers for other controls as needed

    PipelineOperation(std::string n) : name(std::move(n)) {}

    void FindUniformLocations()
    {
        if (!shaderProgram)
            return;
        for (auto &pair : uniforms)
        {
            pair.second.location = glGetUniformLocation(shaderProgram, pair.second.name.c_str());
            if (pair.second.location == -1 && name != "Passthrough" && name != "LinearToSRGB" && name != "SRGBToLinear")
            { // Ignore for simple shaders
                // Don't treat missing texture samplers as errors here, they are set explicitly
                if (pair.second.name != "InputTexture")
                {
                    fprintf(stderr, "Warning: Uniform '%s' not found in shader '%s'\n", pair.second.name.c_str(), name.c_str());
                }
            }
        }
    }
};

// Enum for Color Spaces (expand later)
enum class ColorSpace
{
    LINEAR_SRGB, // Linear Rec.709/sRGB primaries
    SRGB         // Non-linear sRGB (display)
    // Add AdobeRGB, ProPhoto etc. later
};

const char *ColorSpaceToString(ColorSpace cs)
{
    switch (cs)
    {
    case ColorSpace::LINEAR_SRGB:
        return "Linear sRGB";
    case ColorSpace::SRGB:
        return "sRGB";
    default:
        return "Unknown";
    }
}

bool ReadTextureToAppImage(GLuint textureId, int width, int height, AppImage &outImage)
{
    if (textureId == 0 || width <= 0 || height <= 0)
    {
        fprintf(stderr, "ReadTextureToAppImage: Invalid parameters.\n");
        return false;
    }

    // We assume the texture 'textureId' holds LINEAR RGBA FLOAT data (e.g., GL_RGBA16F)
    // Resize AppImage to hold the data
    outImage.resize(width, height, 4);         // Expecting 4 channels (RGBA) from pipeline texture
    outImage.m_isLinear = true;                // Data we read back should be linear
    outImage.m_colorSpaceName = "Linear sRGB"; // Assuming pipeline used sRGB primaries

    std::vector<float> &pixelData = outImage.getPixelVector();
    if (pixelData.empty())
    {
        fprintf(stderr, "ReadTextureToAppImage: Failed to allocate AppImage buffer.\n");
        return false;
    }

    // Bind the texture
    GLint lastTexture;
    glGetIntegerv(GL_TEXTURE_BINDING_2D, &lastTexture);
    glBindTexture(GL_TEXTURE_2D, textureId);

    // Set alignment (good practice)
    glPixelStorei(GL_PACK_ALIGNMENT, 1);

    // Read the pixels
    // We request GL_RGBA and GL_FLOAT as that's our assumed linear working format on GPU
    glGetTexImage(GL_TEXTURE_2D,
                  0,                 // Mipmap level 0
                  GL_RGBA,           // Request RGBA format
                  GL_FLOAT,          // Request float data type
                  pixelData.data()); // Pointer to destination buffer

    GLenum err = glGetError();
    glBindTexture(GL_TEXTURE_2D, lastTexture); // Restore previous binding

    if (err != GL_NO_ERROR)
    {
        fprintf(stderr, "ReadTextureToAppImage: OpenGL Error during glGetTexImage: %u\n", err);
        outImage.clear_image(); // Clear invalid data
        return false;
    }

    printf("ReadTextureToAppImage: Successfully read %dx%d texture.\n", width, height);
    return true;
}

class ImageProcessingPipeline
{
private:
    GLuint m_fbo[2] = {0, 0};
    GLuint m_tex[2] = {0, 0}; // Ping-pong textures
    GLuint m_vao = 0;
    GLuint m_vbo = 0;
    int m_texWidth = 0;
    int m_texHeight = 0;
    GLuint m_passthroughShader = 0;
    GLuint m_linearToSrgbShader = 0;
    GLuint m_srgbToLinearShader = 0;

    void CreateFullscreenQuad()
    {
        // Simple quad covering -1 to 1 in x,y and 0 to 1 in u,v
        float vertices[] = {
            // positions // texCoords
            -1.0f, 1.0f, 0.0f, 1.0f,
            -1.0f, -1.0f, 0.0f, 0.0f,
            1.0f, -1.0f, 1.0f, 0.0f,

            -1.0f, 1.0f, 0.0f, 1.0f,
            1.0f, -1.0f, 1.0f, 0.0f,
            1.0f, 1.0f, 1.0f, 1.0f};
        printf("Matrix ready.\n");

        glGenVertexArrays(1, &m_vao);
        printf("Fullscreen quad VAO created.\n");
        glGenBuffers(1, &m_vbo);
        printf("Fullscreen quad VBO created.\n");

        glBindVertexArray(m_vao);
        glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
        glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
        printf("Fullscreen quad VBO created.\n");

        // Position attribute
        glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void *)0);
        glEnableVertexAttribArray(0);
        // Texture coordinate attribute
        glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void *)(2 * sizeof(float)));
        glEnableVertexAttribArray(1);

        glBindBuffer(GL_ARRAY_BUFFER, 0);
        glBindVertexArray(0);
        printf("Fullscreen quad VAO/VBO created.\n");
    }

    void CreateOrResizeFBOs(int width, int height)
    {
        if (width == m_texWidth && height == m_texHeight && m_fbo[0] != 0)
        {
            return; // Already correct size
        }

        if (width <= 0 || height <= 0)
            return; // Invalid dimensions

        // Cleanup existing
        DestroyFBOs();

        m_texWidth = width;
        m_texHeight = height;

        glGenFramebuffers(2, m_fbo);
        glGenTextures(2, m_tex);

        GLint lastTexture;
        glGetIntegerv(GL_TEXTURE_BINDING_2D, &lastTexture);
        GLint lastFBO;
        glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &lastFBO); // Or GL_FRAMEBUFFER_BINDING

        for (int i = 0; i < 2; ++i)
        {
            glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[i]);
            glBindTexture(GL_TEXTURE_2D, m_tex[i]);

            // Create floating point texture
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, width, height, 0, GL_RGBA, GL_FLOAT, nullptr);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // Use NEAREST for processing steps
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

            // Attach texture to FBO
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_tex[i], 0);

            if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
            {
                fprintf(stderr, "ERROR::FRAMEBUFFER:: Framebuffer %d is not complete!\n", i);
                DestroyFBOs(); // Clean up partial setup
                glBindTexture(GL_TEXTURE_2D, lastTexture);
                glBindFramebuffer(GL_FRAMEBUFFER, lastFBO);
                return;
            }
            else
            {
                printf("FBO %d (Texture %d) created successfully (%dx%d).\n", m_fbo[i], m_tex[i], width, height);
            }
        }
        glBindTexture(GL_TEXTURE_2D, lastTexture);
        glBindFramebuffer(GL_FRAMEBUFFER, lastFBO);
    }

    void DestroyFBOs()
    {
        if (m_fbo[0])
            glDeleteFramebuffers(2, m_fbo);
        if (m_tex[0])
            glDeleteTextures(2, m_tex);
        m_fbo[0] = m_fbo[1] = 0;
        m_tex[0] = m_tex[1] = 0;
        m_texWidth = m_texHeight = 0;
        printf("Destroyed FBOs and textures.\n");
    }

public:
    // The ordered list of operations the user has configured
    std::vector<PipelineOperation> activeOperations;
    ColorSpace inputColorSpace = ColorSpace::LINEAR_SRGB; // Default based on AppImage goal
    ColorSpace outputColorSpace = ColorSpace::SRGB;       // Default for display

    ImageProcessingPipeline() = default;

    ~ImageProcessingPipeline()
    {
        DestroyFBOs();
        if (m_vao)
            glDeleteVertexArrays(1, &m_vao);
        if (m_vbo)
            glDeleteBuffers(1, &m_vbo);
        // Shaders owned by PipelineOperation structs should be deleted externally or via smart pointers
        if (m_passthroughShader)
            glDeleteProgram(m_passthroughShader);
        if (m_linearToSrgbShader)
            glDeleteProgram(m_linearToSrgbShader);
        if (m_srgbToLinearShader)
            glDeleteProgram(m_srgbToLinearShader);
        printf("ImageProcessingPipeline destroyed.\n");
    }

    void Init(const std::string &shaderBasePath)
    {
        printf("Initializing ImageProcessingPipeline...\n");
        CreateFullscreenQuad();
        printf("Fullscreen quad created.\n");
        // Load essential shaders
        std::string vsPath = shaderBasePath + "passthrough.vert";
        printf("Loading shaders from: %s\n", vsPath.c_str());
        m_passthroughShader = LoadShaderProgramFromFiles(vsPath, shaderBasePath + "passthrough.frag");
        m_linearToSrgbShader = LoadShaderProgramFromFiles(vsPath, shaderBasePath + "linear_to_srgb.frag");
        m_srgbToLinearShader = LoadShaderProgramFromFiles(vsPath, shaderBasePath + "srgb_to_linear.frag");
        printf("Loaded shaders: %s, %s, %s\n", vsPath.c_str(), (shaderBasePath + "linear_to_srgb.frag").c_str(), (shaderBasePath + "srgb_to_linear.frag").c_str());

        if (!m_passthroughShader || !m_linearToSrgbShader || !m_srgbToLinearShader)
        {
            fprintf(stderr, "Failed to load essential pipeline shaders!\n");
        }
        else
        {
            printf("Essential pipeline shaders loaded.\n");
        }
    }

    void ResetResources()
    {
        printf("Pipeline: Resetting FBOs and Textures.\n");
        DestroyFBOs(); // Call the existing cleanup method
    }

    // Call this each frame to process the image
    // Returns the Texture ID of the final processed image
    GLuint ProcessImage(GLuint inputTextureId, int width, int height, bool applyOutputConversion = true)
    {
        if (inputTextureId == 0 || width <= 0 || height <= 0)
        {
            return 0; // No input or invalid size
        }

        CreateOrResizeFBOs(width, height);
        if (m_fbo[0] == 0)
        {
            fprintf(stderr, "FBOs not ready, cannot process image.\n");
            return 0; // FBOs not ready
        }

        // Store original viewport and FBO to restore later
        GLint viewport[4];
        glGetIntegerv(GL_VIEWPORT, viewport);
        GLint lastFBO;
        glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &lastFBO);

        glViewport(0, 0, m_texWidth, m_texHeight);
        glBindVertexArray(m_vao); // Bind the quad VAO once

        int currentSourceTexIndex = 0;            // Start with texture m_tex[0] as the first *write* target
        GLuint currentReadTexId = inputTextureId; // Initially read from the original image

        // --- Input Color Space Conversion ---
        bool inputConversionDone = false;
        if (inputColorSpace == ColorSpace::SRGB)
        {
            printf("Pipeline: Applying sRGB -> Linear conversion.\n");
            glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[currentSourceTexIndex]);
            glUseProgram(m_srgbToLinearShader);
            glActiveTexture(GL_TEXTURE0);
            glBindTexture(GL_TEXTURE_1D, currentReadTexId);
            glUniform1i(glGetUniformLocation(m_srgbToLinearShader, "InputTexture"), 0);
            glDrawArrays(GL_TRIANGLES, 0, 6);

            currentReadTexId = m_tex[currentSourceTexIndex];   // Next read is from the texture we just wrote to
            currentSourceTexIndex = 1 - currentSourceTexIndex; // Swap target FBO/texture
            inputConversionDone = true;
        }
        else
        {
            printf("Pipeline: Input is Linear, no conversion needed.\n");
            // If input is already linear, we might need to copy it to the first FBO texture
            // if there are actual processing steps, otherwise the first step reads the original.
            // This copy ensures the ping-pong works correctly even if the first *user* step is disabled.
            // However, if NO user steps are enabled, we want to display the original (potentially with output conversion).
            bool anyUserOpsEnabled = false;
            for (const auto &op : activeOperations)
            {
                if (op.enabled && op.shaderProgram && op.name != "Passthrough")
                { // Check it's a real operation
                    anyUserOpsEnabled = true;
                    break;
                }
            }

            if (anyUserOpsEnabled)
            {
                // Need to copy original linear input into the pipeline's texture space
                printf("Pipeline: Copying linear input to FBO texture for processing.\n");
                glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[currentSourceTexIndex]);
                glUseProgram(m_passthroughShader); // Use simple passthrough
                glActiveTexture(GL_TEXTURE0);
                glBindTexture(GL_TEXTURE_2D, currentReadTexId);
                glUniform1i(glGetUniformLocation(m_passthroughShader, "InputTexture"), 0);
                glDrawArrays(GL_TRIANGLES, 0, 6);
                currentReadTexId = m_tex[currentSourceTexIndex];
                currentSourceTexIndex = 1 - currentSourceTexIndex;
                inputConversionDone = true;
            }
            else
            {
                // No user ops, keep reading directly from original inputTextureId
                inputConversionDone = false; // Treat as if no initial step happened yet
                printf("Pipeline: No enabled user operations, skipping initial copy.\n");
            }
        }

        // --- Apply Editing Operations ---
        int appliedOps = 0;
        for (const auto &op : activeOperations)
        {
            if (op.enabled && op.shaderProgram)
            {
                printf("Pipeline: Applying operation: %s\n", op.name.c_str());
                glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[currentSourceTexIndex]);
                glUseProgram(op.shaderProgram);

                // Set Input Texture Sampler
                glActiveTexture(GL_TEXTURE0);
                glBindTexture(GL_TEXTURE_2D, currentReadTexId);
                GLint loc = glGetUniformLocation(op.shaderProgram, "InputTexture");
                if (loc != -1)
                    glUniform1i(loc, 0);
                else if (op.name != "Passthrough")
                    fprintf(stderr, "Warning: InputTexture uniform not found in shader %s\n", op.name.c_str());

                // Set operation-specific uniforms
                if (op.updateUniformsCallback)
                {
                    op.updateUniformsCallback(op.shaderProgram);
                }
                else
                {
                    // Alternative: Set uniforms directly based on stored pointers
                    if (op.exposureVal && op.uniforms.count("exposureValue"))
                    {
                        glUniform1f(op.uniforms.at("exposureValue").location, *op.exposureVal);
                    }
                    if (op.contrastVal && op.uniforms.count("contrastValue"))
                    {
                        glUniform1f(op.uniforms.at("contrastValue").location, *op.contrastVal);
                    }
                    if (op.clarityVal && op.uniforms.count("clarityValue"))
                    {
                        glUniform1f(op.uniforms.at("clarityValue").location, *op.clarityVal);
                    }
                    if (op.highlightsVal && op.uniforms.count("highlightsValue"))
                    {
                        glUniform1f(op.uniforms.at("highlightsValue").location, *op.highlightsVal);
                    }
                    if (op.shadowsVal && op.uniforms.count("shadowsValue"))
                    {
                        glUniform1f(op.uniforms.at("shadowsValue").location, *op.shadowsVal);
                    }
                    if (op.whitesVal && op.uniforms.count("whitesValue"))
                    {
                        glUniform1f(op.uniforms.at("whitesValue").location, *op.whitesVal);
                    }
                    if (op.blacksVal && op.uniforms.count("blacksValue"))
                    {
                        glUniform1f(op.uniforms.at("blacksValue").location, *op.blacksVal);
                    }
                    if (op.textureVal && op.uniforms.count("textureValue"))
                    {
                        glUniform1f(op.uniforms.at("textureValue").location, *op.textureVal);
                    }
                    if (op.dehazeVal && op.uniforms.count("dehazeValue"))
                    {
                        glUniform1f(op.uniforms.at("dehazeValue").location, *op.dehazeVal);
                    }
                    if (op.saturationVal && op.uniforms.count("saturationValue"))
                    {
                        glUniform1f(op.uniforms.at("saturationValue").location, *op.saturationVal);
                    }
                    if (op.vibranceVal && op.uniforms.count("vibranceValue"))
                    {
                        glUniform1f(op.uniforms.at("vibranceValue").location, *op.vibranceVal);
                    }
                    if (op.temperatureVal && op.uniforms.count("temperatureValue"))
                    {
                        glUniform1f(op.uniforms.at("temperatureValue").location, *op.temperatureVal);
                    }
                    if (op.tintVal && op.uniforms.count("tintValue"))
                    {
                        glUniform1f(op.uniforms.at("tintValue").location, *op.tintVal);
                    }
                }

                glDrawArrays(GL_TRIANGLES, 0, 6);

                // Prepare for next pass
                currentReadTexId = m_tex[currentSourceTexIndex];   // Next pass reads from the texture we just wrote
                currentSourceTexIndex = 1 - currentSourceTexIndex; // Swap FBO target
                appliedOps++;
            }
        }

        // If no user ops were applied AND no input conversion happened,
        // currentReadTexId is still the original inputTextureId.
        if (appliedOps == 0 && !inputConversionDone)
        {
            printf("Pipeline: No operations applied, output = input (%d).\n", currentReadTexId);
            // Proceed to output conversion using original inputTextureId
        }
        else if (appliedOps > 0 || inputConversionDone)
        {
            printf("Pipeline: %d operations applied, final intermediate texture ID: %d\n", appliedOps, currentReadTexId);
            // currentReadTexId now holds the result of the last applied operation (or the input conversion)
        }
        else
        {
            // This case should ideally not be reached if logic above is correct
            printf("Pipeline: Inconsistent state after processing loop.\n");
        }

        // --- Output Color Space Conversion ---
        GLuint finalTextureId = currentReadTexId; // Assume this is the final one unless converted
        if (applyOutputConversion)
        {
            if (outputColorSpace == ColorSpace::SRGB)
            {
                // Check if the last written data (currentReadTexId) is already sRGB.
                // In this simple setup, it's always linear *unless* no ops applied and input was sRGB.
                // More robustly: Track the color space through the pipeline.
                // For now, assume currentReadTexId holds linear data if any op or input conversion happened.
                bool needsLinearToSrgb = (appliedOps > 0 || inputConversionDone);

                if (!needsLinearToSrgb && inputColorSpace == ColorSpace::SRGB)
                {
                    printf("Pipeline: Output is sRGB, and input was sRGB with no ops, no final conversion needed.\n");
                    // Input was sRGB, no ops applied, output should be sRGB. currentReadTexId is original sRGB input.
                    finalTextureId = currentReadTexId;
                }
                else if (needsLinearToSrgb)
                {
                    printf("Pipeline: Applying Linear -> sRGB conversion for output.\n");
                    glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[currentSourceTexIndex]); // Use the *next* FBO for the final write
                    glUseProgram(m_linearToSrgbShader);
                    glActiveTexture(GL_TEXTURE0);
                    glBindTexture(GL_TEXTURE_2D, currentReadTexId); // Read the last result
                    glUniform1i(glGetUniformLocation(m_linearToSrgbShader, "InputTexture"), 0);
                    glDrawArrays(GL_TRIANGLES, 0, 6);
                    finalTextureId = m_tex[currentSourceTexIndex]; // The final result is in this texture
                }
                else
                {
                    // Input was linear, no ops, output requires sRGB.
                    printf("Pipeline: Input Linear, no ops, applying Linear -> sRGB conversion for output.\n");
                    glBindFramebuffer(GL_FRAMEBUFFER, m_fbo[currentSourceTexIndex]);
                    glUseProgram(m_linearToSrgbShader);
                    glActiveTexture(GL_TEXTURE0);
                    glBindTexture(GL_TEXTURE_2D, currentReadTexId); // Read original linear input
                    glUniform1i(glGetUniformLocation(m_linearToSrgbShader, "InputTexture"), 0);
                    glDrawArrays(GL_TRIANGLES, 0, 6);
                    finalTextureId = m_tex[currentSourceTexIndex];
                }
            }
            else
            {
                printf("Pipeline: Output is Linear, no final conversion needed.\n");
                // If output should be linear, finalTextureId is already correct (it's currentReadTexId)
                finalTextureId = currentReadTexId;
            }
        }
        else
        {
            printf("Pipeline: Skipped output conversion. Final (linear) ID: %d\n", finalTextureId);
        }

        // --- Cleanup ---
        glBindVertexArray(0);
        glBindFramebuffer(GL_FRAMEBUFFER, lastFBO);                     // Restore original framebuffer binding
        glViewport(viewport[0], viewport[1], viewport[2], viewport[3]); // Restore viewport
        glUseProgram(0);                                                // Unbind shader program

        printf("Pipeline: ProcessImage returning final texture ID: %d\n", finalTextureId);
        return finalTextureId;
    }
};

static ImageProcessingPipeline g_pipeline;                              // <<< Global pipeline manager instance
static std::vector<std::unique_ptr<PipelineOperation>> g_allOperations; // Store all possible operations
static GLuint g_processedTextureId = 0;                                 // Texture ID after pipeline processing
static ColorSpace g_inputColorSpace = ColorSpace::LINEAR_SRGB;          // Connect to pipeline's setting
static ColorSpace g_outputColorSpace = ColorSpace::SRGB;                // Connect to pipeline's setting

// File Dialogs
static ImGui::FileBrowser g_openFileDialog;
// Add flags for save dialog: Allow new filename, allow creating directories
static ImGui::FileBrowser g_exportSaveFileDialog(ImGuiFileBrowserFlags_EnterNewFilename | ImGuiFileBrowserFlags_CreateNewDir);

// Export Dialog State
static bool g_showExportWindow = false;
static ImageSaveFormat g_exportFormat = ImageSaveFormat::JPEG; // Default format
static int g_exportQuality = 90;                               // Default JPEG quality
static std::string g_exportErrorMsg = "";                      // To display errors in the export dialog

// Current loaded file path (useful for default export name)
static std::string g_currentFilePath = "";

// Crop State
static bool g_cropActive = false;
static ImVec4 g_cropRectNorm = ImVec4(0.0f, 0.0f, 1.0f, 1.0f); // (MinX, MinY, MaxX, MaxY) normalized 0-1
static ImVec4 g_cropRectNormInitial = g_cropRectNorm;          // Store initial state for cancel/dragging base
static float g_cropAspectRatio = 0.0f;                         // 0.0f = Freeform, > 0.0f = constrained (Width / Height)
static int g_selectedAspectRatioIndex = 0;                     // Index for the dropdown

static GLuint g_histogramComputeShader = 0;
static GLuint g_histogramSSBO = 0;
const int NUM_HISTOGRAM_BINS = 256;
const int HISTOGRAM_BUFFER_SIZE = NUM_HISTOGRAM_BINS * 3; // R, G, B
static std::vector<unsigned int> g_histogramDataCPU(HISTOGRAM_BUFFER_SIZE, 0);
static unsigned int g_histogramMaxCount = 255; // Max count found, for scaling (init to 1 to avoid div by zero)
static bool g_histogramResourcesInitialized = false;

// Interaction state
enum class CropHandle
{
    NONE,
    TOP_LEFT,
    TOP_RIGHT,
    BOTTOM_LEFT,
    BOTTOM_RIGHT,
    TOP,
    BOTTOM,
    LEFT,
    RIGHT,
    INSIDE
};
static CropHandle g_activeCropHandle = CropHandle::NONE;
static bool g_isDraggingCrop = false;
static ImVec2 g_dragStartMousePos = ImVec2(0, 0); // Screen coords


bool InitHistogramResources(const std::string& shaderBasePath) {
    printf("Initializing Histogram Resources...\n");
    // Load Compute Shader
    // We need a way to load compute shaders, modify shader_utils or add here
    std::string compSource = ReadFile(shaderBasePath + "histogram.comp"); // Assuming ReadFile exists
    if (compSource.empty()) {
        fprintf(stderr, "ERROR: Failed to read histogram.comp\n");
        return false;
    }
    // Simple Compute Shader Compilation/Linking (add error checking!)
    GLuint computeShaderObj = glCreateShader(GL_COMPUTE_SHADER);
    const char* src = compSource.c_str();
    glShaderSource(computeShaderObj, 1, &src, nullptr);
    glCompileShader(computeShaderObj);
    // --- Add GLint success; glGetShaderiv; glGetShaderInfoLog checks ---
    GLint success;
    glGetShaderiv(computeShaderObj, GL_COMPILE_STATUS, &success);
     if (!success) {
        GLint logLength;
        glGetShaderiv(computeShaderObj, GL_INFO_LOG_LENGTH, &logLength);
        std::vector<char> log(logLength);
        glGetShaderInfoLog(computeShaderObj, logLength, nullptr, log.data());
        fprintf(stderr, "ERROR::SHADER::HISTOGRAM::COMPILATION_FAILED\n%s\n", log.data());
        glDeleteShader(computeShaderObj);
        return false;
    }


    g_histogramComputeShader = glCreateProgram();
    glAttachShader(g_histogramComputeShader, computeShaderObj);
    glLinkProgram(g_histogramComputeShader);
    // --- Add GLint success; glGetProgramiv; glGetProgramInfoLog checks ---
     glGetProgramiv(g_histogramComputeShader, GL_LINK_STATUS, &success);
     if (!success) {
        GLint logLength;
        glGetProgramiv(g_histogramComputeShader, GL_INFO_LOG_LENGTH, &logLength);
        std::vector<char> log(logLength);
        glGetProgramInfoLog(g_histogramComputeShader, logLength, nullptr, log.data());
        fprintf(stderr, "ERROR::PROGRAM::HISTOGRAM::LINKING_FAILED\n%s\n", log.data());
        glDeleteProgram(g_histogramComputeShader);
        g_histogramComputeShader = 0;
        glDeleteShader(computeShaderObj); // Delete shader obj even on link failure
        return false;
    }


    glDeleteShader(computeShaderObj); // Delete shader object after linking
    printf("Histogram compute shader loaded and linked successfully (Program ID: %u).\n", g_histogramComputeShader);


    // Create Shader Storage Buffer Object (SSBO)
    glGenBuffers(1, &g_histogramSSBO);
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, g_histogramSSBO);
    // Allocate buffer size: 3 channels * 256 bins * size of uint
    glBufferData(GL_SHADER_STORAGE_BUFFER, HISTOGRAM_BUFFER_SIZE * sizeof(unsigned int), NULL, GL_DYNAMIC_READ); // Data will be written by GPU, read by CPU
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // Unbind

    GLenum err = glGetError();
    if (err != GL_NO_ERROR || g_histogramSSBO == 0) {
        fprintf(stderr, "ERROR: Failed to create histogram SSBO. OpenGL Error: %u\n", err);
        if (g_histogramComputeShader) glDeleteProgram(g_histogramComputeShader);
        g_histogramComputeShader = 0;
        return false;
    } else {
         printf("Histogram SSBO created successfully (Buffer ID: %u, Size: %d bytes).\n", g_histogramSSBO, HISTOGRAM_BUFFER_SIZE * sizeof(unsigned int));
    }


    g_histogramResourcesInitialized = true;
    return true;
}

// Aspect Ratio Options
struct AspectRatioOption
{
    const char *name;
    float ratio; // W/H
};
static std::vector<AspectRatioOption> g_aspectRatios = {
    {"Freeform", 0.0f},
    {"Original", 0.0f}, // Will be calculated dynamically
    {"1:1", 1.0f},
    {"16:9", 16.0f / 9.0f},
    {"9:16", 9.0f / 16.0f},
    {"4:3", 4.0f / 3.0f},
    {"3:4", 3.0f / 4.0f},
    // Add more as needed
};

void UpdateCropRect(ImVec4& rectNorm, CropHandle handle, ImVec2 deltaNorm, float aspectRatio) {
    ImVec2 minXY = ImVec2(rectNorm.x, rectNorm.y);
    ImVec2 maxXY = ImVec2(rectNorm.z, rectNorm.w);

    // Apply delta based on handle
    switch (handle) {
        case CropHandle::TOP_LEFT:      minXY += deltaNorm; break;
        case CropHandle::TOP_RIGHT:     minXY.y += deltaNorm.y; maxXY.x += deltaNorm.x; break;
        case CropHandle::BOTTOM_LEFT:   minXY.x += deltaNorm.x; maxXY.y += deltaNorm.y; break;
        case CropHandle::BOTTOM_RIGHT:  maxXY += deltaNorm; break;
        case CropHandle::TOP:           minXY.y += deltaNorm.y; break;
        case CropHandle::BOTTOM:        maxXY.y += deltaNorm.y; break;
        case CropHandle::LEFT:          minXY.x += deltaNorm.x; break;
        case CropHandle::RIGHT:         maxXY.x += deltaNorm.x; break;
        case CropHandle::INSIDE:        minXY += deltaNorm; maxXY += deltaNorm; break;
        case CropHandle::NONE:          return; // No change
    }

    // Ensure min < max temporarily before aspect constraint
    if (minXY.x > maxXY.x) ImSwap(minXY.x, maxXY.x);
    if (minXY.y > maxXY.y) ImSwap(minXY.y, maxXY.y);

    // Apply Aspect Ratio Constraint (if aspectRatio > 0)
    if (aspectRatio > 0.0f && handle != CropHandle::INSIDE && handle != CropHandle::NONE)
    {
        float currentW = maxXY.x - minXY.x;
        float currentH = maxXY.y - minXY.y;

        if (currentW < 1e-5f) currentW = 1e-5f; // Avoid division by zero
        if (currentH < 1e-5f) currentH = 1e-5f;

        float currentAspect = currentW / currentH;
        float targetAspect = aspectRatio;

        // Determine which dimension to adjust based on which handle was moved and aspect delta
        // Simplified approach: Adjust height based on width, unless moving top/bottom handles primarily
        bool adjustHeight = true;
        if (handle == CropHandle::TOP || handle == CropHandle::BOTTOM) {
            adjustHeight = false; // Primarily adjust width based on height change
        }

        if (adjustHeight) { // Adjust height based on width
            float targetH = currentW / targetAspect;
            float deltaH = targetH - currentH;
            // Distribute height change based on handle
            if (handle == CropHandle::TOP_LEFT || handle == CropHandle::TOP_RIGHT || handle == CropHandle::TOP) {
                minXY.y -= deltaH; // Adjust top edge
            } else {
                maxXY.y += deltaH; // Adjust bottom edge (or split for side handles?)
                // For LEFT/RIGHT handles, could split deltaH: minXY.y -= deltaH*0.5; maxXY.y += deltaH*0.5;
            }
        } else { // Adjust width based on height
             float targetW = currentH * targetAspect;
             float deltaW = targetW - currentW;
             // Distribute width change based on handle
             if (handle == CropHandle::TOP_LEFT || handle == CropHandle::BOTTOM_LEFT || handle == CropHandle::LEFT) {
                 minXY.x -= deltaW; // Adjust left edge
             } else {
                 maxXY.x += deltaW; // Adjust right edge
                 // For TOP/BOTTOM handles, could split deltaW: minXY.x -= deltaW*0.5; maxXY.x += deltaW*0.5;
             }
        }
    } // End aspect ratio constraint


    // Update the output rectNorm
    rectNorm = ImVec4(minXY.x, minXY.y, maxXY.x, maxXY.y);
}

// Helper function to crop AppImage data
bool ApplyCropToImage(AppImage& image, const ImVec4 cropRectNorm) {
    if (image.isEmpty()) {
        fprintf(stderr, "ApplyCropToImage: Input image is empty.\n");
        return false;
    }
    if (cropRectNorm.x >= cropRectNorm.z || cropRectNorm.y >= cropRectNorm.w) {
         fprintf(stderr, "ApplyCropToImage: Invalid crop rectangle (zero or negative size).\n");
        return false; // Invalid crop rect
    }

    // Clamp rect just in case
    ImVec4 clampedRect = cropRectNorm;
    clampedRect.x = ImClamp(clampedRect.x, 0.0f, 1.0f);
    clampedRect.y = ImClamp(clampedRect.y, 0.0f, 1.0f);
    clampedRect.z = ImClamp(clampedRect.z, 0.0f, 1.0f);
    clampedRect.w = ImClamp(clampedRect.w, 0.0f, 1.0f);

    // Calculate pixel coordinates
    int srcW = image.getWidth();
    int srcH = image.getHeight();
    int channels = image.getChannels();

    int cropX_px = static_cast<int>(round(clampedRect.x * srcW));
    int cropY_px = static_cast<int>(round(clampedRect.y * srcH));
    int cropMaxX_px = static_cast<int>(round(clampedRect.z * srcW));
    int cropMaxY_px = static_cast<int>(round(clampedRect.w * srcH));

    int cropW_px = cropMaxX_px - cropX_px;
    int cropH_px = cropMaxY_px - cropY_px;

    if (cropW_px <= 0 || cropH_px <= 0) {
         fprintf(stderr, "ApplyCropToImage: Resulting crop size is zero or negative (%dx%d).\n", cropW_px, cropH_px);
        return false;
    }

     printf("Applying crop: Start=(%d,%d), Size=(%dx%d)\n", cropX_px, cropY_px, cropW_px, cropH_px);

    // Create new image for cropped data
    AppImage croppedImage(cropW_px, cropH_px, channels);
    if (croppedImage.isEmpty()) {
         fprintf(stderr, "ApplyCropToImage: Failed to allocate memory for cropped image.\n");
        return false;
    }
    croppedImage.m_isLinear = image.isLinear(); // Preserve flags
    croppedImage.m_colorSpaceName = image.getColorSpaceName();
    // TODO: Copy metadata/ICC profile if needed? Cropping usually invalidates some metadata.

    const float* srcData = image.getData();
    float* dstData = croppedImage.getData();

    // Copy pixel data row by row, channel by channel
    for (int y_dst = 0; y_dst < cropH_px; ++y_dst) {
        int y_src = cropY_px + y_dst;
        // Ensure source Y is valid (should be due to clamping/checks, but be safe)
        if (y_src < 0 || y_src >= srcH) continue;

        // Calculate start pointers for source and destination rows
        const float* srcRowStart = srcData + (static_cast<size_t>(y_src) * srcW + cropX_px) * channels;
        float* dstRowStart = dstData + (static_cast<size_t>(y_dst) * cropW_px) * channels;

        // Copy the entire row (width * channels floats)
        std::memcpy(dstRowStart, srcRowStart, static_cast<size_t>(cropW_px) * channels * sizeof(float));
    }

    // Replace the original image data with the cropped data
    // Use std::move if AppImage supports move assignment for efficiency
    image = std::move(croppedImage);

    printf("Cropped image created successfully (%dx%d).\n", image.getWidth(), image.getHeight());
    return true;
}

void InitShaderOperations(const std::string &shaderBasePath)
{
    // Clear existing (if any)
    g_allOperations.clear();
    g_pipeline.activeOperations.clear(); // Also clear the active list in the pipeline

    // --- Define Operations ---
    // Use unique_ptr for automatic memory management
    // Match uniform names to the GLSL shaders

    auto whiteBalanceOp = std::make_unique<PipelineOperation>("White Balance");
    whiteBalanceOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "white_balance.frag");
    if (whiteBalanceOp->shaderProgram)
    {
        whiteBalanceOp->uniforms["temperatureValue"] = {"temperature"};
        whiteBalanceOp->uniforms["tintValue"] = {"tint"};
        whiteBalanceOp->temperatureVal = &temperature;
        whiteBalanceOp->tintVal = &tint;
        whiteBalanceOp->FindUniformLocations();
        g_allOperations.push_back(std::move(whiteBalanceOp));
        printf("  + Loaded White Balance\n");
    }
    else
        printf("  - FAILED White Balance\n");

    auto exposureOp = std::make_unique<PipelineOperation>("Exposure");
    exposureOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "exposure.frag");
    exposureOp->uniforms["exposureValue"] = {"exposureValue"};
    exposureOp->exposureVal = &exposure; // Link to global slider variable
    exposureOp->FindUniformLocations();
    g_allOperations.push_back(std::move(exposureOp));

    auto contrastOp = std::make_unique<PipelineOperation>("Contrast");
    contrastOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "contrast.frag");
    if (contrastOp->shaderProgram)
    {
        contrastOp->uniforms["contrastValue"] = {"contrastValue"};
        contrastOp->contrastVal = &contrast;
        contrastOp->FindUniformLocations();
        g_allOperations.push_back(std::move(contrastOp));
        printf("  + Loaded Contrast\n");
    }
    else
        printf("  - FAILED Contrast\n");

    auto highlightsShadowsOp = std::make_unique<PipelineOperation>("Highlights/Shadows");
    highlightsShadowsOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "highlights_shadows.frag");
    if (highlightsShadowsOp->shaderProgram)
    {
        highlightsShadowsOp->uniforms["highlightsValue"] = {"highlightsValue"};
        highlightsShadowsOp->uniforms["shadowsValue"] = {"shadowsValue"};
        highlightsShadowsOp->highlightsVal = &highlights;
        highlightsShadowsOp->shadowsVal = &shadows;
        highlightsShadowsOp->FindUniformLocations();
        g_allOperations.push_back(std::move(highlightsShadowsOp));
        printf("  + Loaded Highlights/Shadows\n");
    }
    else
        printf("  - FAILED Highlights/Shadows\n");

    auto whiteBlackOp = std::make_unique<PipelineOperation>("Whites/Blacks");

    whiteBlackOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "whites_blacks.frag");
    if (whiteBlackOp->shaderProgram)
    {
        whiteBlackOp->uniforms["whitesValue"] = {"whitesValue"};
        whiteBlackOp->uniforms["blacksValue"] = {"blacksValue"};
        whiteBlackOp->whitesVal = &whites;
        whiteBlackOp->blacksVal = &blacks;
        whiteBlackOp->FindUniformLocations();
        g_allOperations.push_back(std::move(whiteBlackOp));
        printf("  + Loaded Whites/Blacks\n");
    }
    else
        printf("  - FAILED Whites/Blacks\n");

    auto textureOp = std::make_unique<PipelineOperation>("Texture");

    textureOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "texture.frag");
    if (textureOp->shaderProgram)
    {
        textureOp->uniforms["textureValue"] = {"textureValue"};
        textureOp->textureVal = &texture;
        textureOp->FindUniformLocations();
        g_allOperations.push_back(std::move(textureOp));
        printf("  + Loaded Texture\n");
    }
    else
        printf("  - FAILED Texture\n");

    auto clarityOp = std::make_unique<PipelineOperation>("Clarity");
    clarityOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "clarity.frag");
    if (clarityOp->shaderProgram)
    {
        clarityOp->uniforms["clarityValue"] = {"clarityValue"};
        clarityOp->clarityVal = &clarity;
        clarityOp->FindUniformLocations();
        g_allOperations.push_back(std::move(clarityOp));
        printf("  + Loaded Clarity\n");
    }
    else
        printf("  - FAILED Clarity\n");

    auto dehazeOp = std::make_unique<PipelineOperation>("Dehaze");
    dehazeOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "dehaze.frag");
    if (dehazeOp->shaderProgram)
    {
        dehazeOp->uniforms["dehazeValue"] = {"dehazeValue"};
        dehazeOp->dehazeVal = &dehaze;
        dehazeOp->FindUniformLocations();
        g_allOperations.push_back(std::move(dehazeOp));
        printf("  + Loaded Dehaze\n");
    }
    else
        printf("  - FAILED Dehaze\n");

    auto saturationOp = std::make_unique<PipelineOperation>("Saturation");
    saturationOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "saturation.frag");
    if (saturationOp->shaderProgram)
    {
        saturationOp->uniforms["saturationValue"] = {"saturationValue"};
        saturationOp->saturationVal = &saturation;
        saturationOp->FindUniformLocations();
        g_allOperations.push_back(std::move(saturationOp));
        printf("  + Loaded Saturation\n");
    }
    else
        printf("  - FAILED Saturation\n");

    auto vibranceOp = std::make_unique<PipelineOperation>("Vibrance");
    vibranceOp->shaderProgram = LoadShaderProgramFromFiles(shaderBasePath + "passthrough.vert", shaderBasePath + "vibrance.frag");
    if (vibranceOp->shaderProgram)
    {
        vibranceOp->uniforms["vibranceValue"] = {"vibranceValue"};
        vibranceOp->vibranceVal = &vibrance;
        vibranceOp->FindUniformLocations();
        g_allOperations.push_back(std::move(vibranceOp));
        printf("  + Loaded Vibrance\n");
    }
    else
        printf("  - FAILED Vibrance\n");

    g_pipeline.activeOperations.clear();
    for (const auto &op_ptr : g_allOperations)
    {
        if (op_ptr)
        {                                                   // Make sure pointer is valid
            g_pipeline.activeOperations.push_back(*op_ptr); // Add a *copy* to the active list
            // Re-find locations for the copy (or ensure copy constructor handles it)
            g_pipeline.activeOperations.back().FindUniformLocations();
            // Copy the pointers to the actual slider variables
            g_pipeline.activeOperations.back().exposureVal = op_ptr->exposureVal;
            g_pipeline.activeOperations.back().contrastVal = op_ptr->contrastVal;
            g_pipeline.activeOperations.back().clarityVal = op_ptr->clarityVal;
            g_pipeline.activeOperations.back().highlightsVal = op_ptr->highlightsVal;
            g_pipeline.activeOperations.back().shadowsVal = op_ptr->shadowsVal;
            g_pipeline.activeOperations.back().whitesVal = op_ptr->whitesVal;
            g_pipeline.activeOperations.back().blacksVal = op_ptr->blacksVal;
            g_pipeline.activeOperations.back().textureVal = op_ptr->textureVal;
            g_pipeline.activeOperations.back().dehazeVal = op_ptr->dehazeVal;
            g_pipeline.activeOperations.back().saturationVal = op_ptr->saturationVal;
            g_pipeline.activeOperations.back().vibranceVal = op_ptr->vibranceVal;
            g_pipeline.activeOperations.back().temperatureVal = op_ptr->temperatureVal;
            g_pipeline.activeOperations.back().tintVal = op_ptr->tintVal;

            // Set initial enabled state if needed (e.g., all enabled by default)
            g_pipeline.activeOperations.back().enabled = true;
        }
    }
    printf("Initialized %zu possible operations. %zu added to default active pipeline.\n",
           g_allOperations.size(), g_pipeline.activeOperations.size());
}


// Add this function somewhere accessible, e.g., before main()

void ComputeHistogramGPU(GLuint inputTextureID, int width, int height) {
    if (!g_histogramResourcesInitialized || inputTextureID == 0 || width <= 0 || height <= 0) {
        // Clear CPU data if not computed
        std::fill(g_histogramDataCPU.begin(), g_histogramDataCPU.end(), 0);
        g_histogramMaxCount = 1;
        printf("Histogram resources not initialized or invalid input. Skipping computation.\n");
        return;
    }

    // 1. Clear the SSBO buffer data to zeros
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, g_histogramSSBO);
    // Using glBufferSubData might be marginally faster than glClearBufferData if driver optimizes zeroing
    // static std::vector<unsigned int> zeros(HISTOGRAM_BUFFER_SIZE, 0); // Create once
    // glBufferSubData(GL_SHADER_STORAGE_BUFFER, 0, HISTOGRAM_BUFFER_SIZE * sizeof(unsigned int), zeros.data());
    // Or use glClearBufferData (often recommended)
    GLuint zero = 0;
    glClearBufferData(GL_SHADER_STORAGE_BUFFER, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, &zero);
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // Unbind


    // 2. Bind resources and dispatch compute shader
    glUseProgram(g_histogramComputeShader);

    // Bind input texture as image unit 0 (read-only)
    // IMPORTANT: Ensure the format matches the compute shader layout qualifier (e.g., rgba8)
    // If textureToDisplay is RGBA16F, you'd use layout(rgba16f) in shader
    glBindImageTexture(0, inputTextureID, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGBA16); // Assuming display texture is RGBA8

    // Bind SSBO to binding point 1
    glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, g_histogramSSBO);

    // Calculate number of work groups
    GLuint workGroupSizeX = 16; // Must match layout in shader
    GLuint workGroupSizeY = 16;
    GLuint numGroupsX = (width + workGroupSizeX - 1) / workGroupSizeX;
    GLuint numGroupsY = (height + workGroupSizeY - 1) / workGroupSizeY;

    // Dispatch the compute shader
    glDispatchCompute(numGroupsX, numGroupsY, 1);

    // 3. Synchronization: Ensure compute shader writes finish before CPU reads buffer
    // Use a memory barrier on the SSBO writes
    glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);

    // Unbind resources (optional here, but good practice)
    glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, 0);
    glBindImageTexture(0, 0, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGBA16);
    glUseProgram(0);

    // 4. Read histogram data back from SSBO to CPU vector
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, g_histogramSSBO);
    glGetBufferSubData(GL_SHADER_STORAGE_BUFFER, 0, HISTOGRAM_BUFFER_SIZE * sizeof(unsigned int), g_histogramDataCPU.data());
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // Unbind

    // 5. Find the maximum count for scaling the plot (optional, can be capped)
    g_histogramMaxCount = 255; // Reset to 255 (prevents div by zero)
    for (unsigned int count : g_histogramDataCPU) {
        if (count > g_histogramMaxCount) {
            g_histogramMaxCount = count;
        }
    }
    // Optional: Cap max count to prevent extreme peaks from flattening the rest
    // unsigned int capThreshold = (width * height) / 50; // e.g., cap at 2% of pixels
    // g_histogramMaxCount = std::min(g_histogramMaxCount, capThreshold);
    // if (g_histogramMaxCount == 0) g_histogramMaxCount = 1; // Ensure not zero after capping


    GLenum err = glGetError();
    if (err != GL_NO_ERROR) {
        fprintf(stderr, "OpenGL Error during histogram computation/readback: %u\n", err);
        // Optionally clear CPU data on error
         std::fill(g_histogramDataCPU.begin(), g_histogramDataCPU.end(), 0);
         g_histogramMaxCount = 1;
        printf("Histogram computation failed. Data cleared.\n");
    }
    else {
        printf("Histogram computed. Max count: %u\n", g_histogramMaxCount);
    }
}

// Add this function somewhere accessible, e.g., before main()

void DrawHistogramWidget(const char* widgetId, ImVec2 graphSize) {
    if (g_histogramDataCPU.empty() || g_histogramMaxCount <= 1) { // Check if data is valid
        if (g_histogramDataCPU.empty()) {
            ImGui::Text("Histogram data not initialized.");
        } else {
            ImGui::Text("Histogram data is empty or invalid.");
        }
        if (g_histogramMaxCount <= 1) {
            ImGui::Text("Histogram max count is invalid.");
        }
        ImGui::Text("Histogram data not available.");
        return;
    }

    ImGui::PushID(widgetId); // Isolate widget IDs

    ImDrawList* drawList = ImGui::GetWindowDrawList();
    const ImVec2 widgetPos = ImGui::GetCursorScreenPos();

    // Determine actual graph size (negative values mean use available space)
    if (graphSize.x <= 0.0f) graphSize.x = ImGui::GetContentRegionAvail().x;
    if (graphSize.y <= 0.0f) graphSize.y = 100.0f; // Default height

    // Draw background for the histogram area (optional)
    drawList->AddRectFilled(widgetPos, widgetPos + graphSize, IM_COL32(30, 30, 30, 200));

    // Calculate scaling factors
    float barWidth = graphSize.x / float(NUM_HISTOGRAM_BINS);
    float scaleY = graphSize.y / float(g_histogramMaxCount); // Scale based on max count

    // Define colors (with some transparency for overlap visibility)
    const ImU32 colR = IM_COL32(255, 0, 0, 180);
    const ImU32 colG = IM_COL32(0, 255, 0, 180);
    const ImU32 colB = IM_COL32(0, 0, 255, 180);

    // Draw the histogram bars (R, G, B)
    for (int i = 0; i < NUM_HISTOGRAM_BINS; ++i) {
        // Get heights (clamped to graph size)
        float hR = ImMin(float(g_histogramDataCPU[i]) * scaleY, graphSize.y);
        float hG = ImMin(float(g_histogramDataCPU[i + NUM_HISTOGRAM_BINS]) * scaleY, graphSize.y);
        float hB = ImMin(float(g_histogramDataCPU[i + NUM_HISTOGRAM_BINS * 2]) * scaleY, graphSize.y);

        // Calculate bar positions
        float x0 = widgetPos.x + float(i) * barWidth;
        float x1 = x0 + barWidth; // Use lines if bars are too thin, or thin rects
        float yBase = widgetPos.y + graphSize.y; // Bottom of the graph

        // Draw lines or thin rectangles (lines are often better for dense histograms)
        // Overlap/Blend: Draw B, then G, then R so Red is most prominent? Or use alpha blending.
        if (hB > 0) drawList->AddLine(ImVec2(x0 + barWidth * 0.5f, yBase), ImVec2(x0 + barWidth * 0.5f, yBase - hB), colB, 1.0f);
        if (hG > 0) drawList->AddLine(ImVec2(x0 + barWidth * 0.5f, yBase), ImVec2(x0 + barWidth * 0.5f, yBase - hG), colG, 1.0f);
        if (hR > 0) drawList->AddLine(ImVec2(x0 + barWidth * 0.5f, yBase), ImVec2(x0 + barWidth * 0.5f, yBase - hR), colR, 1.0f);

        // --- Alternative: Rectangles (might overlap heavily) ---
        // if (hB > 0) drawList->AddRectFilled(ImVec2(x0, yBase - hB), ImVec2(x1, yBase), colB);
        // if (hG > 0) drawList->AddRectFilled(ImVec2(x0, yBase - hG), ImVec2(x1, yBase), colG);
        // if (hR > 0) drawList->AddRectFilled(ImVec2(x0, yBase - hR), ImVec2(x1, yBase), colR);
    }

    // Draw border around the histogram area (optional)
    drawList->AddRect(widgetPos, widgetPos + graphSize, IM_COL32(150, 150, 150, 255));

    // Advance cursor past the histogram widget area
    ImGui::Dummy(graphSize);

    ImGui::PopID(); // Restore ID stack
}

// Main code
int main(int, char **)
{
    // Setup SDL
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER | SDL_INIT_GAMECONTROLLER) != 0)
    {
        printf("Error: %s\n", SDL_GetError());
        return -1;
    }

    // Decide GL+GLSL versions
#if defined(IMGUI_IMPL_OPENGL_ES2)
    // GL ES 2.0 + GLSL 100 (WebGL 1.0)
    const char *glsl_version = "#version 100";
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, 0);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
#elif defined(IMGUI_IMPL_OPENGL_ES3)
    // GL ES 3.0 + GLSL 300 es (WebGL 2.0)
    const char *glsl_version = "#version 300 es";
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, 0);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
#elif defined(__APPLE__)
    // GL 3.2 Core + GLSL 150
    const char *glsl_version = "#version 150";
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG); // Always required on Mac
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
#else
    // GL 3.0 + GLSL 130
    const char *glsl_version = "#version 130";
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, 0);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
#endif

    // From 2.0.18: Enable native IME.
#ifdef SDL_HINT_IME_SHOW_UI
    SDL_SetHint(SDL_HINT_IME_SHOW_UI, "1");
#endif

    // Create window with graphics context
    SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
    SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
    SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
    SDL_WindowFlags window_flags = (SDL_WindowFlags)(SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI);
    SDL_Window *window = SDL_CreateWindow("tedit", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 1280, 720, window_flags);
    if (window == nullptr)
    {
        printf("Error: SDL_CreateWindow(): %s\n", SDL_GetError());
        return -1;
    }

    SDL_GLContext gl_context = SDL_GL_CreateContext(window);
    if (gl_context == nullptr)
    {
        printf("Error: SDL_GL_CreateContext(): %s\n", SDL_GetError());
        return -1;
    }

    SDL_GL_MakeCurrent(window, gl_context);
    SDL_GL_SetSwapInterval(1); // Enable vsync

    glewExperimental = GL_TRUE; // Needed for core profile
    GLenum err = glewInit();
    if (err != GLEW_OK)
    {
        fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
        return -1;
    }

    

    // Setup Dear ImGui context
    IMGUI_CHECKVERSION();
    ImGui::CreateContext();
    ImGuiIO &io = ImGui::GetIO();
    (void)io;
    io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls
    io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad;  // Enable Gamepad Controls
    io.ConfigFlags |= ImGuiConfigFlags_DockingEnable;     // Enable Docking
    // io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable;       // Enable Multi-Viewport / Platform Windows
    // io.ConfigViewportsNoAutoMerge = true;
    // io.ConfigViewportsNoTaskBarIcon = true;

    // Setup Dear ImGui style
    ImGui::StyleColorsDark();
    // ImGui::StyleColorsLight();

    // When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones.
    ImGuiStyle &style = ImGui::GetStyle();
    if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
    {
        style.WindowRounding = 0.0f;
        style.Colors[ImGuiCol_WindowBg].w = 1.0f;
    }

    // Setup Platform/Renderer backends
    ImGui_ImplSDL2_InitForOpenGL(window, gl_context);
    ImGui_ImplOpenGL3_Init(glsl_version);

    // Our state
    ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f);

    g_openFileDialog.SetTitle("Open Image File");
    // Add common image formats and typical RAW formats
    g_openFileDialog.SetTypeFilters({
        ".jpg", ".jpeg", ".png", ".tif", ".tiff",                       // Standard formats
        ".arw", ".cr2", ".cr3", ".nef", ".dng", ".orf", ".raf", ".rw2", // Common RAW
        ".*"                                                            // Allow any file as fallback
    });

    g_exportSaveFileDialog.SetTitle("Export Image As");
    // Type filters for saving are less critical as we force the extension later,
    // but can be helpful for user navigation. Let's set a default.
    g_exportSaveFileDialog.SetTypeFilters({".jpg", ".png", ".tif"});

    AppImage g_loadedImage; // Your loaded image data
    bool g_imageIsLoaded = false;
    g_processedTextureId = 0; // Initialize processed texture ID
    printf("Initializing image processing pipeline...\n");
    g_pipeline.Init("shaders/"); // Assuming shaders are in shaders/ subdir

    ImGuiTexInspect::ImplOpenGL3_Init(); // Or DirectX 11 equivalent (check your chosen backend header file)
    ImGuiTexInspect::Init();
    ImGuiTexInspect::CreateContext();

    InitShaderOperations("shaders/"); // Initialize shader operations

    if (!InitHistogramResources("shaders/")) {
        // Handle error - maybe disable histogram feature
        fprintf(stderr, "Histogram initialization failed, feature disabled.\n");
    }

    // Main loop
    bool done = false;
#ifdef __EMSCRIPTEN__
    // For an Emscripten build we are disabling file-system access, so let's not attempt to do a fopen() of the imgui.ini file.
    // You may manually call LoadIniSettingsFromMemory() to load settings from your own storage.
    io.IniFilename = nullptr;
    EMSCRIPTEN_MAINLOOP_BEGIN
#else
    while (!done)
#endif
    {
        // Poll and handle events (inputs, window resize, etc.)
        // You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs.
        // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data.
        // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data.
        // Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags.
        SDL_Event event;
        while (SDL_PollEvent(&event))
        {
            ImGui_ImplSDL2_ProcessEvent(&event);
            if (event.type == SDL_QUIT)
                done = true;
            if (event.type == SDL_WINDOWEVENT && event.window.event == SDL_WINDOWEVENT_CLOSE && event.window.windowID == SDL_GetWindowID(window))
                done = true;
        }
        if (SDL_GetWindowFlags(window) & SDL_WINDOW_MINIMIZED)
        {
            SDL_Delay(10);
            continue;
        }

        // Start the Dear ImGui frame
        ImGui_ImplOpenGL3_NewFrame();
        ImGui_ImplSDL2_NewFrame();
        ImGui::NewFrame();

        GLuint textureToDisplay = 0; // Use a local var for clarity
        GLuint textureToSave = 0;    // Texture ID holding final linear data for saving
        if (g_imageIsLoaded && g_loadedImage.m_textureId != 0)
        {
            g_pipeline.inputColorSpace = g_inputColorSpace;
            g_pipeline.outputColorSpace = g_outputColorSpace;

            // Modify pipeline processing slightly to get both display and save textures
            // Add a flag or method to control output conversion for saving
            textureToSave = g_pipeline.ProcessImage(
                g_loadedImage.m_textureId,
                g_loadedImage.getWidth(),
                g_loadedImage.getHeight(),
                false // <-- Add argument: bool applyOutputConversion = true
            );
            textureToDisplay = g_pipeline.ProcessImage(
                g_loadedImage.m_textureId,
                g_loadedImage.getWidth(),
                g_loadedImage.getHeight(),
                true // Apply conversion for display
            );
            // If the pipeline wasn't modified, textureToSave might need extra work
        }
        else
        {
            textureToDisplay = 0;
            textureToSave = 0;
        }

        // --- Menu Bar ---
        if (ImGui::BeginMainMenuBar())
        {
            if (ImGui::BeginMenu("File"))
            {
                if (ImGui::MenuItem("Open...", "Ctrl+O"))
                {
                    g_openFileDialog.Open();
                }
                // Disable Export if no image is loaded
                if (ImGui::MenuItem("Export...", "Ctrl+E", false, g_imageIsLoaded))
                {
                    g_exportErrorMsg = "";     // Clear previous errors
                    g_showExportWindow = true; // <<< Set the flag to show the window
                }
                ImGui::Separator();
                if (ImGui::MenuItem("Exit"))
                {
                    done = true; // Simple exit for now
                }
                ImGui::EndMenu();
            }
            // ... other menus ...
            ImGui::EndMainMenuBar();
        }

        // --- File Dialog Display & Handling ---
        g_openFileDialog.Display();
        g_exportSaveFileDialog.Display();

        if (g_openFileDialog.HasSelected())
        {
            std::string selectedPath = g_openFileDialog.GetSelected().string();
            g_openFileDialog.ClearSelected();
            printf("Opening file: %s\n", selectedPath.c_str());

            // --- Load the selected image ---
            std::optional<AppImage> imgOpt = loadImage(selectedPath);
            if (imgOpt)
            {
                // If an image was already loaded, clean up its texture first
                if (g_loadedImage.m_textureId != 0)
                {
                    glDeleteTextures(1, &g_loadedImage.m_textureId);
                    g_loadedImage.m_textureId = 0;
                }
                // Clean up pipeline resources (FBOs/Textures) before loading new texture
                g_pipeline.ResetResources(); // <<< NEED TO ADD THIS METHOD

                g_loadedImage = std::move(*imgOpt);
                printf("Image loaded (%dx%d, %d channels, Linear:%s)\n",
                       g_loadedImage.getWidth(), g_loadedImage.getHeight(), g_loadedImage.getChannels(), g_loadedImage.isLinear() ? "Yes" : "No");

                if (loadImageTexture(g_loadedImage))
                {
                    g_imageIsLoaded = true;
                    g_currentFilePath = selectedPath; // Store path
                    printf("Float texture created successfully (ID: %u).\n", g_loadedImage.m_textureId);
                    // Maybe reset sliders/pipeline state? Optional.
                }
                else
                {
                    g_imageIsLoaded = false;
                    g_currentFilePath = "";
                    fprintf(stderr, "Failed to load image into GL texture.\n");
                    // TODO: Show error to user (e.g., modal popup)
                }
            }
            else
            {
                g_imageIsLoaded = false;
                g_currentFilePath = "";
                fprintf(stderr, "Failed to load image file: %s\n", selectedPath.c_str());
                // TODO: Show error to user
            }
        }

        if (g_showExportWindow) // <<< Only attempt to draw if flag is true
        {
            // Optional: Center the window the first time it appears
            ImGui::SetNextWindowSize(ImVec2(400, 0), ImGuiCond_Appearing); // Auto-height
            ImVec2 center = ImGui::GetMainViewport()->GetCenter();
            ImGui::SetNextWindowPos(center, ImGuiCond_Appearing, ImVec2(0.5f, 0.5f));

            // Begin a standard window. Pass &g_showExportWindow to enable the 'X' button.
            if (ImGui::Begin("Export Settings", &g_showExportWindow, ImGuiWindowFlags_AlwaysAutoResize))
            {
                ImGui::Text("Choose Export Format and Settings:");
                ImGui::Separator();

                // --- Format Selection ---
                ImGui::Text("Format:");
                ImGui::SameLine();
                // ... (Combo box logic for g_exportFormat remains the same) ...
                const char *formats[] = {"JPEG", "PNG (8-bit)", "PNG (16-bit)", "TIFF (8-bit)", "TIFF (16-bit)"};
                int currentFormatIndex = 0;
                switch (g_exportFormat)
                { /* ... map g_exportFormat to index ... */
                }
                if (ImGui::Combo("##ExportFormat", &currentFormatIndex, formats, IM_ARRAYSIZE(formats)))
                {
                    switch (currentFormatIndex)
                    { /* ... map index back to g_exportFormat ... */
                    }
                    g_exportErrorMsg = "";
                }

                // --- Format Specific Options ---
                if (g_exportFormat == ImageSaveFormat::JPEG)
                {
                    ImGui::SliderInt("Quality", &g_exportQuality, 1, 100);
                }
                else
                {
                    ImGui::Dummy(ImVec2(0.0f, ImGui::GetFrameHeightWithSpacing())); // Keep consistent height
                }
                ImGui::Separator();

                // --- Display Error Messages ---
                if (!g_exportErrorMsg.empty())
                {
                    ImGui::PushStyleColor(ImGuiCol_Text, ImVec4(1.0f, 0.2f, 0.2f, 1.0f));
                    ImGui::TextWrapped("Error: %s", g_exportErrorMsg.c_str());
                    ImGui::PopStyleColor();
                    ImGui::Separator();
                }

                // --- Action Buttons ---
                if (ImGui::Button("Save As...", ImVec2(120, 0)))
                {
                    // ... (Logic to set default name/path and call g_exportSaveFileDialog.Open() remains the same) ...
                    std::filesystem::path currentPath(g_currentFilePath);
                    std::string defaultName = currentPath.stem().string() + "_edited";
                    g_exportSaveFileDialog.SetPwd(currentPath.parent_path());
                    // g_exportSaveFileDialog.SetInputName(defaultName); // If supported
                    g_exportSaveFileDialog.Open();
                }
                ImGui::SameLine();
                // No need for an explicit Cancel button if the 'X' works, but can keep it:
                if (ImGui::Button("Cancel", ImVec2(120, 0)))
                {
                    g_showExportWindow = false; // Close the window by setting the flag
                }

            } // Matches ImGui::Begin("Export Settings",...)
            ImGui::End(); // IMPORTANT: Always call End() for Begin()

        } // End of if(g_showExportWindow)

        // --- Handle Export Save Dialog Selection ---
        if (g_exportSaveFileDialog.HasSelected())
        {
            // ... (Your existing logic to get path, correct extension) ...
            std::filesystem::path savePathFs = g_exportSaveFileDialog.GetSelected();
            g_exportSaveFileDialog.ClearSelected();
            std::string savePath = savePathFs.string();
            // ... (Ensure/correct extension logic) ...

            // --- Get Processed Image Data & Save ---
            printf("Attempting to save to: %s\n", savePath.c_str());
            g_exportErrorMsg = "";

            if (textureToSave != 0)
            {
                AppImage exportImageRGBA; // Name it clearly - it holds RGBA data
                printf("Reading back texture ID %u for saving...\n", textureToSave);
                if (ReadTextureToAppImage(textureToSave, g_loadedImage.getWidth(), g_loadedImage.getHeight(), exportImageRGBA))
                {
                    printf("Texture readback successful, saving...\n");
                    // <<< --- ADD CONVERSION LOGIC HERE --- >>>
                    bool saveResult = false;
                    if (g_exportFormat == ImageSaveFormat::JPEG)
                    {
                        // JPEG cannot handle 4 channels, convert to 3 (RGB)
                        if (exportImageRGBA.getChannels() == 4)
                        {
                            printf("JPEG selected: Converting 4-channel RGBA to 3-channel RGB...\n");
                            AppImage exportImageRGB(exportImageRGBA.getWidth(), exportImageRGBA.getHeight(), 3);
                            // Check allocation success? (Should be fine if RGBA worked)

                            const float *rgbaData = exportImageRGBA.getData();
                            float *rgbData = exportImageRGB.getData();
                            size_t numPixels = exportImageRGBA.getWidth() * exportImageRGBA.getHeight();

                            for (size_t i = 0; i < numPixels; ++i)
                            {
                                // Copy R, G, B; discard A
                                rgbData[i * 3 + 0] = rgbaData[i * 4 + 0]; // R
                                rgbData[i * 3 + 1] = rgbaData[i * 4 + 1]; // G
                                rgbData[i * 3 + 2] = rgbaData[i * 4 + 2]; // B
                            }
                            exportImageRGB.m_isLinear = exportImageRGBA.isLinear();                // Preserve linearity flag
                            exportImageRGB.m_colorSpaceName = exportImageRGBA.getColorSpaceName(); // Preserve colorspace info

                            printf("Conversion complete, saving RGB data...\n");
                            saveResult = saveImage(exportImageRGB, savePath, g_exportFormat, g_exportQuality);
                        }
                        else
                        {
                            // Source wasn't 4 channels? Unexpected, but save it directly.
                            printf("Warning: Expected 4 channels for JPEG conversion, got %d. Saving directly...\n", exportImageRGBA.getChannels());
                            saveResult = saveImage(exportImageRGBA, savePath, g_exportFormat, g_exportQuality);
                        }
                    }
                    else
                    {
                        // Format is PNG or TIFF, which should handle 4 channels (or 1/3)
                        printf("Saving image with original channels (%d) for PNG/TIFF...\n", exportImageRGBA.getChannels());
                        saveResult = saveImage(exportImageRGBA, savePath, g_exportFormat, g_exportQuality);
                    }
                    // <<< --- END CONVERSION LOGIC --- >>>
                    if (saveResult)
                    {
                        printf("Image saved successfully!\n");
                        g_showExportWindow = false; // <<< Close the settings window on success
                    }
                    else
                    {
                        fprintf(stderr, "Failed to save image.\n");
                        g_exportErrorMsg = "Failed to save image data to file.";
                    }
                }
                else
                {
                    fprintf(stderr, "Failed to read back texture data from GPU.\n");
                    g_exportErrorMsg = "Failed to read processed image data from GPU.";
                }
            }
            else
            {
                fprintf(stderr, "Cannot save: Invalid processed texture ID.\n");
                g_exportErrorMsg = "No valid processed image data available to save.";
            }
        }

        static bool use_dockspace = true;
        if (use_dockspace)
        {
            ImGuiViewport *viewport = ImGui::GetMainViewport();
            ImGuiID dockspace_id = ImGui::GetID("MyDockSpace");

            // Use DockSpaceOverViewport instead of creating a manual window
            // Set the viewport size for the dockspace node. This is important.
            ImGui::SetNextWindowPos(viewport->WorkPos);
            ImGui::SetNextWindowSize(viewport->WorkSize);
            ImGui::SetNextWindowViewport(viewport->ID);

            // Use PassthruCentralNode to make the central node background transparent
            // so the ImGui default background shows until a window is docked there.
            ImGuiDockNodeFlags dockspace_flags = ImGuiDockNodeFlags_PassthruCentralNode;

            // We wrap the DockSpace call in a window that doesn't really draw anything itself,
            // but is required by the DockBuilder mechanism to target the space.
            // Make it borderless, no title, etc.
            ImGuiWindowFlags host_window_flags = 0;
            host_window_flags |= ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove;
            host_window_flags |= ImGuiWindowFlags_NoBringToFrontOnFocus | ImGuiWindowFlags_NoNavFocus;
            host_window_flags |= ImGuiWindowFlags_NoBackground; // Make the host window transparent

            ImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f);
            ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f);
            ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f));
            ImGui::Begin("DockSpaceWindowHost", nullptr, host_window_flags); // No bool* needed
            ImGui::PopStyleVar(3);

            // Create the actual dockspace area.
            ImGui::DockSpace(dockspace_id, ImVec2(0.0f, 0.0f), dockspace_flags);

            ImGui::End(); // End the transparent host window

            // --- DockBuilder setup (runs once) ---
            // This logic remains the same, targeting the dockspace_id
            // Use DockBuilderGetNode()->IsEmpty() as a robust check for first time setup or reset.
            ImGuiDockNode *centralNode = ImGui::DockBuilderGetNode(dockspace_id);
            if (centralNode == nullptr || centralNode->IsEmpty())
            {
                printf("DockBuilder: Setting up initial layout for DockID %u\n", dockspace_id);
                ImGui::DockBuilderRemoveNode(dockspace_id); // Clear out any previous state
                ImGui::DockBuilderAddNode(dockspace_id, ImGuiDockNodeFlags_DockSpace);
                ImGui::DockBuilderSetNodeSize(dockspace_id, viewport->Size); // Set the size for the root node

                ImGuiID dock_main_id = dockspace_id; // This is the ID of the node just added
                ImGuiID dock_right_id, dock_left_id, dock_center_id;

                // Split right first (Edit Panel)
                ImGui::DockBuilderSplitNode(dock_main_id, ImGuiDir_Right, 0.25f, &dock_right_id, &dock_main_id);
                // Then split left from the remaining main area (Exif Panel)
                ImGui::DockBuilderSplitNode(dock_main_id, ImGuiDir_Left, 0.25f, &dock_left_id, &dock_center_id); // dock_center_id is the final remaining central node

                // Dock the windows into the nodes
                ImGui::DockBuilderDockWindow("Image Exif", dock_left_id);
                ImGui::DockBuilderDockWindow("Edit Image", dock_right_id);
                ImGui::DockBuilderDockWindow("Image View", dock_center_id); // Dock image view into the center

                ImGui::DockBuilderFinish(dockspace_id);
                printf("DockBuilder: Layout finished.\n");
            }
            // --- End DockBuilder setup ---

            // --- Now Begin the actual windows that get docked ---
            // These calls are now *outside* any manual container window.
            // They will find their place in the dockspace based on the DockBuilder setup or user interaction.

            // "Image View" window
            ImGui::Begin("Image View");
            // Display the texture that HAS the output conversion applied
            ImVec2 imageWidgetTopLeftScreen = ImGui::GetCursorScreenPos(); // Position BEFORE the inspector panel
            ImVec2 availableContentSize = ImGui::GetContentRegionAvail();  // Size available FOR the inspector panel

            GLuint displayTexId = textureToDisplay; // Use the display texture ID
            if (displayTexId != 0)
            {
                ComputeHistogramGPU(textureToDisplay, g_loadedImage.getWidth(), g_loadedImage.getHeight());
                // Assume ImGuiTexInspect fills available space. This might need adjustment.
                ImVec2 displaySize = availableContentSize;
                float displayAspect = displaySize.x / displaySize.y;
                float imageAspect = float(g_loadedImage.getWidth()) / float(g_loadedImage.getHeight());

                ImVec2 imageDisplaySize;                  // Actual size the image occupies on screen (letterboxed/pillarboxed)
                ImVec2 imageDisplayOffset = ImVec2(0, 0); // Offset within the widget area due to letterboxing

                if (displayAspect > imageAspect)
                { // Display is wider than image -> letterbox (bars top/bottom)
                    imageDisplaySize.y = displaySize.y;
                    imageDisplaySize.x = imageDisplaySize.y * imageAspect;
                    imageDisplayOffset.x = (displaySize.x - imageDisplaySize.x) * 0.5f;
                }
                else
                { // Display is taller than image (or same aspect) -> pillarbox (bars left/right)
                    imageDisplaySize.x = displaySize.x;
                    imageDisplaySize.y = imageDisplaySize.x / imageAspect;
                    imageDisplayOffset.y = (displaySize.y - imageDisplaySize.y) * 0.5f;
                }

                ImVec2 imageTopLeftScreen = imageWidgetTopLeftScreen + imageDisplayOffset;
                ImVec2 imageBottomRightScreen = imageTopLeftScreen + imageDisplaySize;
                // Use textureToDisplay here
                ImGuiTexInspect::BeginInspectorPanel("Image Inspector", (ImTextureID)(intptr_t)displayTexId,
                                                     ImVec2(g_loadedImage.m_width, g_loadedImage.m_height),
                                                     ImGuiTexInspect::InspectorFlags_NoTooltip |
                                                         ImGuiTexInspect::InspectorFlags_NoGrid |
                                                         ImGuiTexInspect::InspectorFlags_NoForceFilterNearest,
                                                     ImGuiTexInspect::SizeIncludingBorder(availableContentSize));
                ImGuiTexInspect::EndInspectorPanel();

                // --- Draw Crop Overlay If Active ---
                if (g_cropActive && g_imageIsLoaded)
                {
                    
                    ImDrawList *drawList = ImGui::GetForegroundDrawList();
                    ImGuiIO &io = ImGui::GetIO();
                    ImVec2 mousePos = io.MousePos;

                    // Calculate screen coords of the current crop rectangle
                    ImVec2 cropMinScreen = imageTopLeftScreen + ImVec2(g_cropRectNorm.x, g_cropRectNorm.y) * imageDisplaySize;
                    ImVec2 cropMaxScreen = imageTopLeftScreen + ImVec2(g_cropRectNorm.z, g_cropRectNorm.w) * imageDisplaySize;
                    ImVec2 cropSizeScreen = cropMaxScreen - cropMinScreen;

                    // Define handle size and interaction margin
                    float handleScreenSize = 8.0f;
                    float handleInteractionMargin = handleScreenSize * 1.5f; // Larger click area
                    ImU32 colRect = IM_COL32(255, 255, 255, 200);            // White rectangle
                    ImU32 colHandle = IM_COL32(255, 255, 255, 255);          // Solid white handle
                    ImU32 colGrid = IM_COL32(200, 200, 200, 100);            // Faint grid lines
                    ImU32 colHover = IM_COL32(255, 255, 0, 255);             // Yellow highlight

                    // --- Define Handle Positions (screen coordinates) ---
                    // Corners
                    ImVec2 tl = cropMinScreen;
                    ImVec2 tr = ImVec2(cropMaxScreen.x, cropMinScreen.y);
                    ImVec2 bl = ImVec2(cropMinScreen.x, cropMaxScreen.y);
                    ImVec2 br = cropMaxScreen;
                    // Mid-edges
                    ImVec2 tm = ImVec2((tl.x + tr.x) * 0.5f, tl.y);
                    ImVec2 bm = ImVec2((bl.x + br.x) * 0.5f, bl.y);
                    ImVec2 lm = ImVec2(tl.x, (tl.y + bl.y) * 0.5f);
                    ImVec2 rm = ImVec2(tr.x, (tr.y + br.y) * 0.5f);

                    // Handle definitions for hit testing and drawing
                    struct HandleDef
                    {
                        CropHandle id;
                        ImVec2 pos;
                    };
                    HandleDef handles[] = {
                        {CropHandle::TOP_LEFT, tl}, {CropHandle::TOP_RIGHT, tr}, {CropHandle::BOTTOM_LEFT, bl}, {CropHandle::BOTTOM_RIGHT, br}, {CropHandle::TOP, tm}, {CropHandle::BOTTOM, bm}, {CropHandle::LEFT, lm}, {CropHandle::RIGHT, rm}};

                    // --- Interaction Handling ---
                    bool isHoveringAnyHandle = false;
                    CropHandle hoveredHandle = CropHandle::NONE;

                    // Only interact if window is hovered
                    if (ImGui::IsWindowHovered()) // ImGuiHoveredFlags_AllowWhenBlockedByActiveItem might also be needed
                    {
                        // Check handles first (higher priority than inside rect)
                        for (const auto &h : handles)
                        {
                            ImRect handleRect(h.pos - ImVec2(handleInteractionMargin, handleInteractionMargin),
                                              h.pos + ImVec2(handleInteractionMargin, handleInteractionMargin));
                            if (handleRect.Contains(mousePos))
                            {
                                hoveredHandle = h.id;
                                isHoveringAnyHandle = true;
                                break;
                            }
                        }

                        // Check inside rect if no handle hovered
                        ImRect insideRect(cropMinScreen, cropMaxScreen);
                        if (!isHoveringAnyHandle && insideRect.Contains(mousePos))
                        {
                            hoveredHandle = CropHandle::INSIDE;
                        }

                        // Mouse Down: Start dragging
                        if (hoveredHandle != CropHandle::NONE && ImGui::IsMouseClicked(ImGuiMouseButton_Left))
                        {
                            g_activeCropHandle = hoveredHandle;
                            g_isDraggingCrop = true;
                            g_dragStartMousePos = mousePos;
                            g_cropRectNormInitial = g_cropRectNorm; // Store state at drag start
                            printf("Started dragging handle: %d\n", (int)g_activeCropHandle);
                        }
                    } // End IsWindowHovered check

                    // Mouse Drag: Update crop rectangle
                    if (g_isDraggingCrop && ImGui::IsMouseDragging(ImGuiMouseButton_Left))
                    {
                        ImVec2 mouseDeltaScreen = mousePos - g_dragStartMousePos;
                        // Convert delta to normalized image coordinates
                        ImVec2 mouseDeltaNorm = ImVec2(0, 0);
                        if (imageDisplaySize.x > 1e-3 && imageDisplaySize.y > 1e-3)
                        { // Avoid division by zero
                            mouseDeltaNorm = mouseDeltaScreen / imageDisplaySize;
                        }

                        // Update g_cropRectNorm based on handle and delta
                        // Store temporary rect to apply constraints later
                        ImVec4 tempRect = g_cropRectNormInitial; // Work from initial state + delta

                        // --- Update Logic (Needs Aspect Ratio Constraint Integration) ---
                        // [This part is complex - Simplified version below]
                        UpdateCropRect(tempRect, g_activeCropHandle, mouseDeltaNorm, g_cropAspectRatio);

                        // Clamp final rect to 0-1 range and ensure min < max
                        tempRect.x = ImClamp(tempRect.x, 0.0f, 1.0f);
                        tempRect.y = ImClamp(tempRect.y, 0.0f, 1.0f);
                        tempRect.z = ImClamp(tempRect.z, 0.0f, 1.0f);
                        tempRect.w = ImClamp(tempRect.w, 0.0f, 1.0f);
                        if (tempRect.x > tempRect.z)
                            ImSwap(tempRect.x, tempRect.z);
                        if (tempRect.y > tempRect.w)
                            ImSwap(tempRect.y, tempRect.w);
                        // Prevent zero size rect? (Optional)
                        // float minSizeNorm = 0.01f; // e.g., 1% minimum size
                        // if (tempRect.z - tempRect.x < minSizeNorm) tempRect.z = tempRect.x + minSizeNorm;
                        // if (tempRect.w - tempRect.y < minSizeNorm) tempRect.w = tempRect.y + minSizeNorm;

                        g_cropRectNorm = tempRect; // Update the actual state
                    }
                    else if (g_isDraggingCrop && ImGui::IsMouseReleased(ImGuiMouseButton_Left))
                    {
                        // Mouse Release: Stop dragging
                        g_isDraggingCrop = false;
                        g_activeCropHandle = CropHandle::NONE;
                        printf("Stopped dragging crop.\n");
                    }

                    // --- Drawing ---
                    // Dimming overlay (optional) - Draw 4 rects outside the crop area
                    drawList->AddRectFilled(imageTopLeftScreen, ImVec2(cropMinScreen.x, imageBottomRightScreen.y), IM_COL32(0,0,0,100)); // Left
                    drawList->AddRectFilled(ImVec2(cropMaxScreen.x, imageTopLeftScreen.y), imageBottomRightScreen, IM_COL32(0,0,0,100)); // Right
                    drawList->AddRectFilled(ImVec2(cropMinScreen.x, imageTopLeftScreen.y), ImVec2(cropMaxScreen.x, cropMinScreen.y), IM_COL32(0,0,0,100)); // Top
                    drawList->AddRectFilled(ImVec2(cropMinScreen.x, cropMaxScreen.y), ImVec2(cropMaxScreen.x, imageBottomRightScreen.y), IM_COL32(0,0,0,100)); // Bottom

                    // Draw crop rectangle outline
                    drawList->AddRect(cropMinScreen, cropMaxScreen, colRect, 0.0f, 0, 1.5f);

                    // Draw grid lines (simple 3x3 grid)
                    float thirdW = cropSizeScreen.x / 3.0f;
                    float thirdH = cropSizeScreen.y / 3.0f;
                    drawList->AddLine(ImVec2(cropMinScreen.x + thirdW, cropMinScreen.y), ImVec2(cropMinScreen.x + thirdW, cropMaxScreen.y), colGrid, 1.0f);
                    drawList->AddLine(ImVec2(cropMinScreen.x + thirdW * 2, cropMinScreen.y), ImVec2(cropMinScreen.x + thirdW * 2, cropMaxScreen.y), colGrid, 1.0f);
                    drawList->AddLine(ImVec2(cropMinScreen.x, cropMinScreen.y + thirdH), ImVec2(cropMaxScreen.x, cropMinScreen.y + thirdH), colGrid, 1.0f);
                    drawList->AddLine(ImVec2(cropMinScreen.x, cropMinScreen.y + thirdH * 2), ImVec2(cropMaxScreen.x, cropMinScreen.y + thirdH * 2), colGrid, 1.0f);

                    // Draw handles
                    for (const auto &h : handles)
                    {
                        bool isHovered = (h.id == hoveredHandle);
                        bool isActive = (h.id == g_activeCropHandle);
                        drawList->AddRectFilled(h.pos - ImVec2(handleScreenSize / 2, handleScreenSize / 2),
                                                h.pos + ImVec2(handleScreenSize / 2, handleScreenSize / 2),
                                                (isHovered || isActive) ? colHover : colHandle);
                    }
                } // End if(g_cropActive)
            }
            else
            {
                // Show placeholder text if no image is loaded
                ImVec2 winSize = ImGui::GetWindowSize();
                ImVec2 textSize = ImGui::CalcTextSize("No Image Loaded");
                ImGui::SetCursorPos(ImVec2((winSize.x - textSize.x) * 0.5f, (winSize.y - textSize.y) * 0.5f));
                ImGui::Text("No Image Loaded. File -> Open... to load an image");
                std::fill(g_histogramDataCPU.begin(), g_histogramDataCPU.end(), 0);
              g_histogramMaxCount = 1;
                // Or maybe: "File -> Open... to load an image"
            }
            ImGui::End(); // End Image View

            // "Image Exif" window
            ImGui::Begin("Image Exif");
            if (g_imageIsLoaded)
            {
                ImGui::Text("Image Width: %d", g_loadedImage.m_width);
                ImGui::Text("Image Height: %d", g_loadedImage.m_height);
                ImGui::Text("Image Loaded: %s", g_imageIsLoaded ? "Yes" : "No");
                ImGui::Text("Image Channels: %d", g_loadedImage.m_channels);
                ImGui::Text("Image Color Space: %s", g_loadedImage.m_colorSpaceName.c_str());
                ImGui::Text("Image ICC Profile Size: %zu bytes", g_loadedImage.m_iccProfile.size());
                ImGui::Text("Image Metadata Size: %zu bytes", g_loadedImage.m_metadata.size());
                ImGui::Separator();
                ImGui::Text("Image Metadata: ");
                for (const auto &entry : g_loadedImage.m_metadata)
                {
                    ImGui::Text("%s: %s", entry.first.c_str(), entry.second.c_str());
                }
            } // Closing the if statement for g_imageIsLoaded
            ImGui::End(); // End Image Exif

            // "Edit Image" window
            ImGui::Begin("Edit Image");

            if (ImGui::CollapsingHeader("Histogram", ImGuiTreeNodeFlags_DefaultOpen)) {
                DrawHistogramWidget("ExifHistogram", ImVec2(-1, 256));
            }

            // --- Edit Image (Right) ---
            ImGui::Begin("Edit Image");

            // --- Pipeline Configuration ---
            ImGui::SeparatorText("Processing Pipeline");

            // Input Color Space Selector
            ImGui::Text("Input Color Space:");
            ImGui::SameLine();
            if (ImGui::BeginCombo("##InputCS", ColorSpaceToString(g_inputColorSpace)))
            {
                if (ImGui::Selectable(ColorSpaceToString(ColorSpace::LINEAR_SRGB), g_inputColorSpace == ColorSpace::LINEAR_SRGB))
                {
                    g_inputColorSpace = ColorSpace::LINEAR_SRGB;
                }
                if (ImGui::Selectable(ColorSpaceToString(ColorSpace::SRGB), g_inputColorSpace == ColorSpace::SRGB))
                {
                    g_inputColorSpace = ColorSpace::SRGB;
                }
                // Add other spaces later
                ImGui::EndCombo();
            }

            // Output Color Space Selector
            ImGui::Text("Output Color Space:");
            ImGui::SameLine();
            if (ImGui::BeginCombo("##OutputCS", ColorSpaceToString(g_outputColorSpace)))
            {
                if (ImGui::Selectable(ColorSpaceToString(ColorSpace::LINEAR_SRGB), g_outputColorSpace == ColorSpace::LINEAR_SRGB))
                {
                    g_outputColorSpace = ColorSpace::LINEAR_SRGB;
                }
                if (ImGui::Selectable(ColorSpaceToString(ColorSpace::SRGB), g_outputColorSpace == ColorSpace::SRGB))
                {
                    g_outputColorSpace = ColorSpace::SRGB;
                }
                // Add other spaces later
                ImGui::EndCombo();
            }

            ImGui::Separator();
            ImGui::Text("Operation Order:");

            // Drag-and-Drop Reordering List
            // Store indices or pointers to allow reordering `g_pipeline.activeOperations`
            int move_from = -1, move_to = -1;
            for (int i = 0; i < g_pipeline.activeOperations.size(); ++i)
            {
                PipelineOperation &op = g_pipeline.activeOperations[i];

                ImGui::PushID(i); // Ensure unique IDs for controls within the loop

                // Checkbox to enable/disable
                ImGui::Checkbox("", &op.enabled);
                ImGui::SameLine();

                // Simple Up/Down Buttons (alternative or complementary to DND)
                
                if (ImGui::ArrowButton("##up", ImGuiDir_Up) && i > 0)
                {
                    move_from = i;
                    move_to = i - 1;
                }
                ImGui::SameLine();
                if (ImGui::ArrowButton("##down", ImGuiDir_Down) && i < g_pipeline.activeOperations.size() - 1)
                {
                    move_from = i;
                    move_to = i + 1;
                }
                ImGui::SameLine();
                

                // Selectable for drag/drop source/target
                ImGui::Selectable(op.name.c_str(), false, 0, ImVec2(ImGui::GetContentRegionAvail().x - 30, 0)); // Leave space for buttons

                // Simple Drag Drop implementation
                if (ImGui::BeginDragDropSource(ImGuiDragDropFlags_None))
                {
                    ImGui::SetDragDropPayload("PIPELINE_OP_DND", &i, sizeof(int));
                    ImGui::Text("Move %s", op.name.c_str());
                    ImGui::EndDragDropSource();
                }
                if (ImGui::BeginDragDropTarget())
                {
                    if (const ImGuiPayload *payload = ImGui::AcceptDragDropPayload("PIPELINE_OP_DND"))
                    {
                        IM_ASSERT(payload->DataSize == sizeof(int));
                        move_from = *(const int *)payload->Data;
                        move_to = i;
                    }
                    ImGui::EndDragDropTarget();
                }


                ImGui::PopID();
            }

            // Process move if detected
            if (move_from != -1 && move_to != -1 && move_from != move_to)
            {
                PipelineOperation temp = g_pipeline.activeOperations[move_from];
                g_pipeline.activeOperations.erase(g_pipeline.activeOperations.begin() + move_from);
                g_pipeline.activeOperations.insert(g_pipeline.activeOperations.begin() + move_to, temp);
                printf("Moved operation %d to %d\n", move_from, move_to);
            }

            ImGui::SeparatorText("Adjustments");

            // --- Adjustment Controls ---
            // Group sliders under collapsing headers as before
            // The slider variables (exposure, contrast, etc.) are now directly
            // linked to the PipelineOperation structs via pointers.
            if (ImGui::CollapsingHeader("White Balance", ImGuiTreeNodeFlags_DefaultOpen))
            {
                ImGui::SliderFloat("Temperature", &temperature, 1000.0f, 20000.0f);
                ImGui::SliderFloat("Tint", &tint, -100.0f, 100.0f);
            }
            ImGui::Separator();
            if (ImGui::CollapsingHeader("Tone", ImGuiTreeNodeFlags_DefaultOpen))
            {
                ImGui::SliderFloat("Exposure", &exposure, -5.0f, 5.0f, "%.1f", ImGuiSliderFlags_Logarithmic);
                ImGui::SliderFloat("Contrast", &contrast, -5.0f, 5.0f, "%.1f", ImGuiSliderFlags_Logarithmic);
                ImGui::Separator();
                ImGui::SliderFloat("Highlights", &highlights, -100.0f, 100.0f);
                ImGui::SliderFloat("Shadows", &shadows, -100.0f, 100.0f);
                ImGui::SliderFloat("Whites", &whites, -100.0f, 100.0f);
                ImGui::SliderFloat("Blacks", &blacks, -100.0f, 100.0f);
            }
            ImGui::Separator();
            if (ImGui::CollapsingHeader("Presence", ImGuiTreeNodeFlags_DefaultOpen))
            {
                ImGui::SliderFloat("Texture", &texture, -100.0f, 100.0f);
                ImGui::SliderFloat("Clarity", &clarity, -100.0f, 100.0f);
                ImGui::SliderFloat("Dehaze", &dehaze, -100.0f, 100.0f);
                ImGui::Separator();
                ImGui::SliderFloat("Vibrance", &vibrance, -100.0f, 100.0f);
                ImGui::SliderFloat("Saturation", &saturation, -100.0f, 100.0f);
            }
            ImGui::Separator();

            ImGui::SeparatorText("Transform");

            if (!g_cropActive)
            {
                if (ImGui::Button("Crop & Straighten"))
                { // Combine visually for now
                    g_cropActive = true;
                    g_cropRectNorm = ImVec4(0.0f, 0.0f, 1.0f, 1.0f); // Reset crop on activation
                    g_cropRectNormInitial = g_cropRectNorm;          // Store initial state
                    g_activeCropHandle = CropHandle::NONE;
                    g_isDraggingCrop = false;

                    // Update Original aspect ratio if needed
                    if (g_loadedImage.getHeight() > 0)
                    {
                        for (auto &opt : g_aspectRatios)
                        {
                            if (strcmp(opt.name, "Original") == 0)
                            {
                                opt.ratio = float(g_loadedImage.getWidth()) / float(g_loadedImage.getHeight());
                                break;
                            }
                        }
                    }
                    // If current selection is 'Original', update g_cropAspectRatio
                    if (g_selectedAspectRatioIndex >= 0 && g_selectedAspectRatioIndex < g_aspectRatios.size() &&
                        strcmp(g_aspectRatios[g_selectedAspectRatioIndex].name, "Original") == 0)
                    {
                        g_cropAspectRatio = g_aspectRatios[g_selectedAspectRatioIndex].ratio;
                    }

                    printf("Crop tool activated.\n");
                }
            }
            else
            {
                ImGui::Text("Crop Active");

                // Aspect Ratio Selector
                if (ImGui::BeginCombo("Aspect Ratio", g_aspectRatios[g_selectedAspectRatioIndex].name))
                {
                    for (int i = 0; i < g_aspectRatios.size(); ++i)
                    {
                        bool is_selected = (g_selectedAspectRatioIndex == i);
                        if (ImGui::Selectable(g_aspectRatios[i].name, is_selected))
                        {
                            g_selectedAspectRatioIndex = i;
                            g_cropAspectRatio = g_aspectRatios[i].ratio;
                            // Optional: Reset crop rectangle slightly or adjust existing one
                            // to the new ratio if transitioning from freeform? Or just let user resize.
                            printf("Selected aspect ratio: %s (%.2f)\n", g_aspectRatios[i].name, g_cropAspectRatio);
                        }
                        if (is_selected)
                            ImGui::SetItemDefaultFocus();
                    }
                    ImGui::EndCombo();
                }

                // Apply/Cancel Buttons
                if (ImGui::Button("Apply Crop"))
                {
                    printf("Apply Crop button clicked.\n");
                    // <<< --- CALL FUNCTION TO APPLY CROP --- >>>
                    if (ApplyCropToImage(g_loadedImage, g_cropRectNorm))
                    {
                        printf("Crop applied successfully. Reloading texture and resetting pipeline.\n");
                        // Reload texture with cropped data
                        if (!loadImageTexture(g_loadedImage))
                        {
                            fprintf(stderr, "Error reloading texture after crop!\n");
                            g_imageIsLoaded = false; // Mark as not usable
                        }
                        // Reset pipeline FBOs/Textures due to size change
                        g_pipeline.ResetResources();
                    }
                    else
                    {
                        fprintf(stderr, "Failed to apply crop to image data.\n");
                        // Optionally show error to user
                    }
                    // Reset state after applying
                    g_cropActive = false;
                    g_cropRectNorm = ImVec4(0.0f, 0.0f, 1.0f, 1.0f);
                    g_activeCropHandle = CropHandle::NONE;
                    g_isDraggingCrop = false;
                }
                ImGui::SameLine();
                if (ImGui::Button("Cancel Crop"))
                {
                    printf("Crop cancelled.\n");
                    g_cropActive = false;
                    g_cropRectNorm = ImVec4(0.0f, 0.0f, 1.0f, 1.0f); // Reset to full image
                    g_activeCropHandle = CropHandle::NONE;
                    g_isDraggingCrop = false;
                }
            }

            ImGui::End(); // End Edit Image

            ImGui::End(); // End MainDockspaceWindow
        }
        else
        {
            // Option 2: Simple full-screen window (no docking)
            ImGuiViewport *viewport = ImGui::GetMainViewport();
            ImGui::SetNextWindowPos(viewport->WorkPos);
            ImGui::SetNextWindowSize(viewport->WorkSize);
            ImGuiWindowFlags window_flags = ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoSavedSettings | ImGuiWindowFlags_NoBringToFrontOnFocus;
            ImGui::Begin("FullImageViewer", nullptr, window_flags);
            ImGui::Text("Image Viewer");
            ImGuiTexInspect::BeginInspectorPanel("Image Inspector", g_loadedImage.m_textureId, ImVec2(g_loadedImage.m_width, g_loadedImage.m_height), ImGuiTexInspect::InspectorFlags_NoTooltip);
            ImGuiTexInspect::EndInspectorPanel();
            ImGui::End();
        }

        // Rendering
        ImGui::Render();
        glViewport(0, 0, (int)io.DisplaySize.x, (int)io.DisplaySize.y);
        glClearColor(clear_color.x * clear_color.w, clear_color.y * clear_color.w, clear_color.z * clear_color.w, clear_color.w);
        glClear(GL_COLOR_BUFFER_BIT);
        ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());

        // Update and Render additional Platform Windows
        // (Platform functions may change the current OpenGL context, so we save/restore it to make it easier to paste this code elsewhere.
        //  For this specific demo app we could also call SDL_GL_MakeCurrent(window, gl_context) directly)
        if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
        {
            SDL_Window *backup_current_window = SDL_GL_GetCurrentWindow();
            SDL_GLContext backup_current_context = SDL_GL_GetCurrentContext();
            ImGui::UpdatePlatformWindows();
            ImGui::RenderPlatformWindowsDefault();
            SDL_GL_MakeCurrent(backup_current_window, backup_current_context);
        }

        SDL_GL_SwapWindow(window);
    }
#ifdef __EMSCRIPTEN__
    EMSCRIPTEN_MAINLOOP_END;
#endif

    // Cleanup
    // --- Cleanup ---
    // Destroy operations which will delete shader programs
    g_allOperations.clear();             // Deletes PipelineOperation objects and their shaders
    g_pipeline.activeOperations.clear(); // Clear the list in pipeline (doesn't own shaders)
    // Pipeline destructor handles FBOs/VAO etc.

    // Delete the originally loaded texture
    if (g_loadedImage.m_textureId != 0)
    {
        glDeleteTextures(1, &g_loadedImage.m_textureId);
        g_loadedImage.m_textureId = 0;
    }

    if (g_histogramResourcesInitialized) {
        if (g_histogramSSBO) glDeleteBuffers(1, &g_histogramSSBO);
        if (g_histogramComputeShader) glDeleteProgram(g_histogramComputeShader);
         printf("Cleaned up histogram resources.\n");
    }

    ImGuiTexInspect::Shutdown();

    ImGui_ImplOpenGL3_Shutdown();
    ImGui_ImplSDL2_Shutdown();
    ImGui::DestroyContext();

    SDL_GL_DeleteContext(gl_context);
    SDL_DestroyWindow(window);
    SDL_Quit();

    return 0;
}

app_image.h

#ifndef APP_IMAGE_H
#define APP_IMAGE_H

#include <vector>
#include <string>
#include <map>
#include <optional> // Requires C++17
#include <memory>
#include <cstdint>
#include <cmath>
#include <fstream>
#include <stdexcept>
#include <algorithm>
#include <iostream> // For errors/warnings
#include <cstring>  // For strcmp, memcpy, etc.
#include <setjmp.h> // For libjpeg/libpng error handling
#define IMGUI_DEFINE_MATH_OPERATORS // Allows ImVec2 operators
#include "imgui_internal.h" // Need ImFloorSigned, ImClamp, ImMax, ImMin, ImAbs

// --- User Instructions ---
// 1. Place easyexif.h in your include path.
// 2. Ensure development libraries for LibRaw, libjpeg-turbo, libpng, and libtiff are installed.
// 3. In EXACTLY ONE .cpp file in your project, before including this header, define:
//    #define APP_IMAGE_IMPLEMENTATION
// 4. When compiling, LINK against the necessary libraries, e.g., using CMake or directly:
//    g++ your_app.cpp -o your_app -std=c++17 -lraw -ljpeg -lpng -ltiff -lm (order might matter)

// --- Forward declarations of external library types (optional, mostly for clarity) ---
// struct jpeg_decompress_struct;
// struct jpeg_compress_struct;
// struct jpeg_error_mgr;
// struct png_struct_def;
// struct png_info_def;
// typedef struct tiff TIFF; // From tiffio.h
// class LibRaw; // From libraw/libraw.h

// Include easyexif here as it's header-only anyway
#include "exif.h"

// Enum for specifying save formats
enum class ImageSaveFormat
{
    JPEG,    // Quality setting applies (1-100), saves as 8-bit sRGB.
    PNG_8,   // 8-bit PNG (sRGB assumption).
    PNG_16,  // 16-bit PNG (Linear or sRGB depends on future implementation details, currently linear).
    TIFF_8,  // 8-bit TIFF (Uncompressed, RGB).
    TIFF_16, // 16-bit TIFF (Uncompressed, RGB, Linear).
    // TIFF_LZW_16 // Example for compressed TIFF
    UNKNOWN
};

// Basic structure for image metadata (can hold EXIF tags)
using ImageMetadata = std::map<std::string, std::string>;

// --- App Internal Image Representation ---
class AppImage
{
public:
    // --- Constructors ---
    AppImage() = default;
    AppImage(uint32_t width, uint32_t height, uint32_t channels = 3);

    // --- Accessors ---
    uint32_t getWidth() const { return m_width; }
    uint32_t getHeight() const { return m_height; }
    uint32_t getChannels() const { return m_channels; }
    bool isEmpty() const { return m_pixelData.empty(); }

    // Pixel data: Linear floating point [0.0, 1.0+], interleaved RGB/RGBA/Gray.
    float *getData() { return m_pixelData.data(); }
    const float *getData() const { return m_pixelData.data(); }
    size_t getDataSize() const { return m_pixelData.size() * sizeof(float); }
    size_t getTotalFloats() const { return m_pixelData.size(); }

    std::vector<float> &getPixelVector() { return m_pixelData; }
    const std::vector<float> &getPixelVector() const { return m_pixelData; }

    // --- Metadata ---
    ImageMetadata &getMetadata() { return m_metadata; }
    const ImageMetadata &getMetadata() const { return m_metadata; }

    // --- Color Information ---
    std::vector<uint8_t> &getIccProfile() { return m_iccProfile; }
    const std::vector<uint8_t> &getIccProfile() const { return m_iccProfile; }

    std::string &getColorSpaceName() { return m_colorSpaceName; }
    const std::string &getColorSpaceName() const { return m_colorSpaceName; }
    bool isLinear() const { return m_isLinear; }

    // --- Modifiers ---
    void resize(uint32_t newWidth, uint32_t newHeight, uint32_t newChannels = 0);
    void clear_image();

    // --- Data members ---
    // Making them public for easier access in the implementation section below,
    // alternatively make loadImage/saveImage friends or add internal setters.
    // public:
    uint32_t m_width = 0;
    uint32_t m_height = 0;
    uint32_t m_channels = 0; // 1=Gray, 3=RGB, 4=RGBA

    std::vector<float> m_pixelData;
    ImageMetadata m_metadata;
    std::vector<uint8_t> m_iccProfile;
    std::string m_colorSpaceName = "Unknown";
    bool m_isLinear = true; // Default assumption for internal format
    GLuint m_textureId = 0;
    int m_textureWidth = 0;
    int m_textureHeight = 0;
};

// --- API Function Declarations ---

/**
 * @brief Loads an image file, attempting type detection (RAW, JPEG, PNG, TIFF).
 * Uses LibRaw, libjpeg-turbo, libpng, libtiff.
 * Uses EasyExif for EXIF metadata from JPEGs (only).
 * Converts loaded pixel data to internal linear float format.
 * Extracts ICC profile if available (primarily from RAW).
 *
 * @param filePath Path to the image file.
 * @return std::optional<AppImage> containing the loaded image on success, std::nullopt on failure.
 */
std::optional<AppImage> loadImage(const std::string &filePath);

/**
 * @brief Saves the AppImage to a file (JPEG, PNG, TIFF).
 * Uses libjpeg-turbo, libpng, libtiff.
 * Converts internal linear float data to target format (e.g., 8-bit sRGB for JPEG).
 * NOTE: Does NOT currently save EXIF or ICC metadata. This requires more complex handling
 *       (e.g., using Exiv2 library or manual file manipulation after saving pixels).
 *
 * @param image The AppImage to save. Assumed to be in linear float format.
 * @param filePath Path to save the image file.
 * @param format The desired output format.
 * @param quality JPEG quality (1-100), ignored otherwise.
 * @return True on success, false on failure.
 */
bool saveImage(const AppImage &image,
               const std::string &filePath,
               ImageSaveFormat format,
               int quality = 90);

bool loadImageTexture(const AppImage &appImage);

// ============================================================================
// =================== IMPLEMENTATION SECTION =================================
// ============================================================================
// Define APP_IMAGE_IMPLEMENTATION in exactly one .cpp file before including this header

#ifdef APP_IMAGE_IMPLEMENTATION

#include <libraw/libraw.h>
#include <jpeglib.h>
#include <png.h>
#include <tiffio.h>

// Internal helper namespace
namespace AppImageUtil
{

    // --- Error Handling ---
    // Basic error reporting (prints to stderr)
    inline void LogError(const std::string &msg)
    {
        std::cerr << "AppImage Error: " << msg << std::endl;
    }
    inline void LogWarning(const std::string &msg)
    {
        std::cerr << "AppImage Warning: " << msg << std::endl;
    }

    // --- Color Conversion Helpers (Approximate sRGB) ---
    // For critical work, use a color management library (LittleCMS) and proper piecewise functions
    inline float srgb_to_linear_approx(float srgbVal)
    {
        if (srgbVal <= 0.0f)
            return 0.0f;
        if (srgbVal <= 0.04045f)
        {
            return srgbVal / 12.92f;
        }
        else
        {
            return std::pow((srgbVal + 0.055f) / 1.055f, 2.4f);
        }
    }

    inline float linear_to_srgb_approx(float linearVal)
    {
        if (linearVal <= 0.0f)
            return 0.0f;
        // Simple clamp for typical display output
        linearVal = std::fmax(0.0f, std::fmin(1.0f, linearVal));
        if (linearVal <= 0.0031308f)
        {
            return linearVal * 12.92f;
        }
        else
        {
            return 1.055f * std::pow(linearVal, 1.0f / 2.4f) - 0.055f;
        }
    }

    // --- File Type Detection ---
    enum class DetectedFileType
    {
        RAW,
        JPEG,
        PNG,
        TIFF,
        UNKNOWN
    };

    inline DetectedFileType detectFileType(const std::string &filePath)
    {
        std::ifstream file(filePath, std::ios::binary);
        if (!file)
            return DetectedFileType::UNKNOWN;

        unsigned char magic[12]; // Read enough bytes for common signatures
        file.read(reinterpret_cast<char *>(magic), sizeof(magic));
        if (!file)
            return DetectedFileType::UNKNOWN;

        // Check common signatures
        if (magic[0] == 0xFF && magic[1] == 0xD8 && magic[2] == 0xFF)
            return DetectedFileType::JPEG;
        if (magic[0] == 0x89 && magic[1] == 'P' && magic[2] == 'N' && magic[3] == 'G')
            return DetectedFileType::PNG;
        if ((magic[0] == 'I' && magic[1] == 'I' && magic[2] == 0x2A && magic[3] == 0x00) || // Little-endian TIFF
            (magic[0] == 'M' && magic[1] == 'M' && magic[2] == 0x00 && magic[3] == 0x2A))   // Big-endian TIFF
        {

            size_t dotPos = filePath.rfind('.');
            if (dotPos != std::string::npos)
            {
                std::string ext = filePath.substr(dotPos);
                std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);

                // Common RAW formats that use TIFF structure
                const char *rawTiffExtensions[] = {
                    ".nef", // Nikon
                    ".cr2", // Canon
                    ".dng", // Adobe/Various
                    ".arw", // Sony
                    ".srw", // Samsung
                    ".orf", // Olympus
                    ".pef", // Pentax
                    ".raf", // Fuji
                    ".rw2"  // Panasonic
                };

                for (const char *rawExt : rawTiffExtensions)
                {
                    if (ext == rawExt)
                        return DetectedFileType::RAW;
                }
            }
            return DetectedFileType::TIFF;
        }

        // If no standard signature matches, check extension for RAW as a fallback
        // (LibRaw handles many internal variations)
        size_t dotPos = filePath.rfind('.');
        if (dotPos != std::string::npos)
        {
            std::string ext = filePath.substr(dotPos);
            std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
            const char *rawExtensions[] = {
                ".3fr", ".ari", ".arw", ".bay", ".braw", ".crw", ".cr2", ".cr3", ".cap",
                ".data", ".dcs", ".dcr", ".dng", ".drf", ".eip", ".erf", ".fff", ".gpr",
                ".iiq", ".k25", ".kdc", ".mdc", ".mef", ".mos", ".mrw", ".nef", ".nrw",
                ".obm", ".orf", ".pef", ".ptx", ".pxn", ".r3d", ".raf", ".raw", ".rwl",
                ".rw2", ".rwz", ".sr2", ".srf", ".srw", ".tif", ".x3f" // Note: .tif can be RAW or regular TIFF
            };
            for (const char *rawExt : rawExtensions)
            {
                if (ext == rawExt)
                    return DetectedFileType::RAW;
            }
            // Special case: Leica .dng can also be loaded by LibRaw
            if (ext == ".dng")
                return DetectedFileType::RAW;
        }

        return DetectedFileType::UNKNOWN;
    }

    // --- EXIF Loading Helper (using EasyExif) ---
    inline void loadExifData(const std::string &filePath, ImageMetadata &metadata)
    {
        std::ifstream file(filePath, std::ios::binary | std::ios::ate);
        if (!file)
            return;
        std::streamsize size = file.tellg();
        file.seekg(0, std::ios::beg);
        std::vector<unsigned char> buffer(size);
        if (!file.read(reinterpret_cast<char *>(buffer.data()), size))
            return;

        easyexif::EXIFInfo exifInfo;
        int code = exifInfo.parseFrom(buffer.data(), buffer.size());
        if (code == 0)
        {
            // Helper lambda to add if not empty
            auto addMeta = [&](const std::string &key, const std::string &value)
            {
                if (!value.empty())
                    metadata[key] = value;
            };
            auto addMetaInt = [&](const std::string &key, int value)
            {
                if (value > 0)
                    metadata[key] = std::to_string(value);
            };
            auto addMetaDouble = [&](const std::string &key, double value)
            {
                if (value > 0)
                    metadata[key] = std::to_string(value);
            };

            addMeta("Exif.Image.Make", exifInfo.Make);
            addMeta("Exif.Image.Model", exifInfo.Model);
            addMeta("Exif.Image.Software", exifInfo.Software);
            addMetaInt("Exif.Image.Orientation", exifInfo.Orientation);
            addMeta("Exif.Image.DateTime", exifInfo.DateTime);
            addMeta("Exif.Photo.DateTimeOriginal", exifInfo.DateTimeOriginal);
            addMeta("Exif.Photo.DateTimeDigitized", exifInfo.DateTimeDigitized);
            addMeta("Exif.Image.SubSecTimeOriginal", exifInfo.SubSecTimeOriginal); // Often empty
            addMeta("Exif.Image.Copyright", exifInfo.Copyright);
            addMetaDouble("Exif.Photo.ExposureTime", exifInfo.ExposureTime);
            addMetaDouble("Exif.Photo.FNumber", exifInfo.FNumber);
            addMetaInt("Exif.Photo.ISOSpeedRatings", exifInfo.ISOSpeedRatings);
            addMetaDouble("Exif.Photo.ShutterSpeedValue", exifInfo.ShutterSpeedValue); // APEX
            addMetaDouble("Exif.Photo.ApertureValue", exifInfo.FNumber);               // APEX
            addMetaDouble("Exif.Photo.ExposureBiasValue", exifInfo.ExposureBiasValue);
            addMetaDouble("Exif.Photo.FocalLength", exifInfo.FocalLength);
            addMeta("Exif.Photo.LensModel", exifInfo.LensInfo.Model);
            // GeoLocation
            if (exifInfo.GeoLocation.Latitude != 0 || exifInfo.GeoLocation.Longitude != 0)
            {
                metadata["Exif.GPSInfo.Latitude"] = std::to_string(exifInfo.GeoLocation.Latitude);
                metadata["Exif.GPSInfo.Longitude"] = std::to_string(exifInfo.GeoLocation.Longitude);
                metadata["Exif.GPSInfo.Altitude"] = std::to_string(exifInfo.GeoLocation.Altitude);
                metadata["Exif.GPSInfo.LatitudeRef"] = exifInfo.GeoLocation.LatComponents.direction;
                metadata["Exif.GPSInfo.LongitudeRef"] = exifInfo.GeoLocation.LonComponents.direction;
            }
        }
        else
        {
            // LogWarning("Could not parse EXIF data (Code " + std::to_string(code) + ") from " + filePath);
        }
    }

    // --- LibRaw Loading ---
    inline std::optional<AppImage> loadRaw(const std::string &filePath)
    {
        LibRaw rawProcessor;
        AppImage image;

        // Set parameters for desired output
        // Output 16-bit data
        rawProcessor.imgdata.params.output_bps = 16;
        // Disable automatic brightness adjustment (we want linear)
        rawProcessor.imgdata.params.no_auto_bright = 1;
        // Set output color space (e.g., 1 = sRGB, 3 = ProPhoto, 4 = AdobeRGB)
        // ProPhoto (3) or AdobeRGB (4) are good wide-gamut choices if editor supports them.
        // sRGB (1) is safest if unsure. We'll assume Linear sRGB for now.
        rawProcessor.imgdata.params.output_color = 1; // 1 = sRGB primaries
        // Set gamma (1.0 for linear) - use {1.0, 1.0} for linear output
        rawProcessor.imgdata.params.gamm[0] = 1.0; // Linear gamma
        rawProcessor.imgdata.params.gamm[1] = 1.0;
        // Use camera white balance if available, otherwise auto
        rawProcessor.imgdata.params.use_camera_wb = 1;
        rawProcessor.imgdata.params.use_auto_wb = (rawProcessor.imgdata.params.use_camera_wb == 0);
        // Consider other params: demosaic algorithm, highlight recovery, etc.

        int ret;
        if ((ret = rawProcessor.open_file(filePath.c_str())) != LIBRAW_SUCCESS)
        {
            LogError("LibRaw: Cannot open file " + filePath + " - " + libraw_strerror(ret));
            return std::nullopt;
        }

        if ((ret = rawProcessor.unpack()) != LIBRAW_SUCCESS)
        {
            LogError("LibRaw: Cannot unpack file " + filePath + " - " + libraw_strerror(ret));
            return std::nullopt;
        }

        // Process the image (demosaic, color conversion, etc.)
        if ((ret = rawProcessor.dcraw_process()) != LIBRAW_SUCCESS)
        {
            LogError("LibRaw: Cannot process file " + filePath + " - " + libraw_strerror(ret));
            // Try fallback processing if dcraw_process fails (might be non-RAW TIFF/JPEG)
            if (ret == LIBRAW_UNSUPPORTED_THUMBNAIL || ret == LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE)
            {
                LogWarning("LibRaw: File " + filePath + " might be non-RAW or only has thumbnail. Attempting fallback.");
                // You could try loading with libjpeg/libtiff here, but for simplicity we fail
            }
            return std::nullopt;
        }

        // Get the processed image data
        libraw_processed_image_t *processed_image = rawProcessor.dcraw_make_mem_image(&ret);
        if (!processed_image)
        {
            LogError("LibRaw: Cannot make memory image for " + filePath + " - " + libraw_strerror(ret));
            return std::nullopt;
        }

        // Copy data to AppImage format
        if (processed_image->type == LIBRAW_IMAGE_BITMAP && processed_image->bits == 16)
        {
            image.m_width = processed_image->width;
            image.m_height = processed_image->height;
            image.m_channels = processed_image->colors; // Should be 3 (RGB)
            image.m_isLinear = true;                    // We requested linear gamma

            if (image.m_channels != 3)
            {
                LogWarning("LibRaw: Expected 3 channels, got " + std::to_string(image.m_channels));
                // Handle grayscale or other cases if needed, for now assume RGB
                image.m_channels = 3;
            }

            size_t num_pixels = static_cast<size_t>(image.m_width) * image.m_height;
            size_t total_floats = num_pixels * image.m_channels;
            image.m_pixelData.resize(total_floats);

            uint16_t *raw_data = reinterpret_cast<uint16_t *>(processed_image->data);
            float *app_data = image.m_pixelData.data();

            // Convert 16-bit unsigned short [0, 65535] to float [0.0, 1.0+]
            for (size_t i = 0; i < total_floats; ++i)
            {
                app_data[i] = static_cast<float>(raw_data[i]) / 65535.0f;
            }

            // Get color space name based on output_color param
            switch (rawProcessor.imgdata.params.output_color)
            {
            case 1:
                image.m_colorSpaceName = "Linear sRGB";
                break;
            case 2:
                image.m_colorSpaceName = "Linear Adobe RGB (1998)";
                break; // Check LibRaw docs if this is correct mapping
            case 3:
                image.m_colorSpaceName = "Linear ProPhoto RGB";
                break;
            case 4:
                image.m_colorSpaceName = "Linear XYZ";
                break; // Check LibRaw docs
            default:
                image.m_colorSpaceName = "Linear Unknown";
                break;
            }

            // Extract Metadata (Example - add more fields as needed)
            image.m_metadata["LibRaw.Camera.Make"] = rawProcessor.imgdata.idata.make;
            image.m_metadata["LibRaw.Camera.Model"] = rawProcessor.imgdata.idata.model;
            image.m_metadata["LibRaw.Image.Timestamp"] = std::to_string(rawProcessor.imgdata.other.timestamp);
            image.m_metadata["LibRaw.Image.ShotOrder"] = std::to_string(rawProcessor.imgdata.other.shot_order);
            image.m_metadata["LibRaw.Photo.ExposureTime"] = std::to_string(rawProcessor.imgdata.other.shutter);
            image.m_metadata["LibRaw.Photo.Aperture"] = std::to_string(rawProcessor.imgdata.other.aperture);
            image.m_metadata["LibRaw.Photo.ISOSpeed"] = std::to_string(rawProcessor.imgdata.other.iso_speed);
            image.m_metadata["LibRaw.Photo.FocalLength"] = std::to_string(rawProcessor.imgdata.other.focal_len);
            // Copy EasyExif compatible fields if possible for consistency
            image.m_metadata["Exif.Image.Make"] = rawProcessor.imgdata.idata.make;
            image.m_metadata["Exif.Image.Model"] = rawProcessor.imgdata.idata.model;
            image.m_metadata["Exif.Photo.ExposureTime"] = std::to_string(rawProcessor.imgdata.other.shutter);
            image.m_metadata["Exif.Photo.FNumber"] = std::to_string(rawProcessor.imgdata.other.aperture); // Aperture == FNumber
            image.m_metadata["Exif.Photo.ISOSpeedRatings"] = std::to_string(rawProcessor.imgdata.other.iso_speed);
            image.m_metadata["Exif.Photo.FocalLength"] = std::to_string(rawProcessor.imgdata.other.focal_len);
            // LibRaw often provides DateTimeOriginal via timestamp
            // Convert timestamp to string if needed:
            // time_t ts = rawProcessor.imgdata.other.timestamp;
            // char buf[30];
            // strftime(buf, sizeof(buf), "%Y:%m:%d %H:%M:%S", localtime(&ts));
            // image.m_metadata["Exif.Photo.DateTimeOriginal"] = buf;

            // Extract ICC Profile
            unsigned int icc_size = 0;
            const void *icc_profile_ptr = nullptr;
            if (icc_profile_ptr && icc_size > 0)
            {
                image.m_iccProfile.resize(icc_size);
                std::memcpy(image.m_iccProfile.data(), icc_profile_ptr, icc_size);
                LogWarning("LibRaw: Successfully extracted ICC profile (" + std::to_string(icc_size) + " bytes).");
                // We could potentially parse the ICC profile name here, but it's complex.
                if (image.m_colorSpaceName == "Linear Unknown")
                    image.m_colorSpaceName = "Linear (Embedded ICC)";
            }
            else
            {
                LogWarning("LibRaw: No ICC profile found or extracted.");
            }
        }
        else
        {
            LogError("LibRaw: Processed image is not 16-bit bitmap (type=" + std::to_string(processed_image->type) + " bits=" + std::to_string(processed_image->bits) + ")");
            LibRaw::dcraw_clear_mem(processed_image);
            return std::nullopt;
        }

        // Clean up LibRaw resources
        LibRaw::dcraw_clear_mem(processed_image);
        // rawProcessor is automatically cleaned up by its destructor

        return image;
    }

    // --- libjpeg Loading ---
    // Custom error handler for libjpeg
    struct JpegErrorManager
    {
        jpeg_error_mgr pub;
        jmp_buf setjmp_buffer; // For returning control on error
    };

    void jpegErrorExit(j_common_ptr cinfo)
    {
        JpegErrorManager *myerr = reinterpret_cast<JpegErrorManager *>(cinfo->err);
        // Format the error message
        char buffer[JMSG_LENGTH_MAX];
        (*cinfo->err->format_message)(cinfo, buffer);
        LogError("libjpeg: " + std::string(buffer));
        // Return control to setjmp point
        longjmp(myerr->setjmp_buffer, 1);
    }

    inline std::optional<AppImage> loadJpeg(const std::string &filePath)
    {
        FILE *infile = fopen(filePath.c_str(), "rb");
        if (!infile)
        {
            LogError("Cannot open JPEG file: " + filePath);
            return std::nullopt;
        }

        AppImage image;
        jpeg_decompress_struct cinfo;
        JpegErrorManager jerr; // Custom error handler

        // Setup error handling
        cinfo.err = jpeg_std_error(&jerr.pub);
        jerr.pub.error_exit = jpegErrorExit;
        if (setjmp(jerr.setjmp_buffer))
        {
            // If we get here, a fatal error occurred
            jpeg_destroy_decompress(&cinfo);
            fclose(infile);
            return std::nullopt;
        }

        // Initialize decompression object
        jpeg_create_decompress(&cinfo);
        jpeg_stdio_src(&cinfo, infile);

        // Read header
        jpeg_read_header(&cinfo, TRUE);

        // Start decompressor - this guesses output parameters like color space
        // We usually get JCS_RGB for color JPEGs
        cinfo.out_color_space = JCS_RGB; // Request RGB output
        jpeg_start_decompress(&cinfo);

        image.m_width = cinfo.output_width;
        image.m_height = cinfo.output_height;
        image.m_channels = cinfo.output_components; // Should be 3 for JCS_RGB

        if (image.m_channels != 1 && image.m_channels != 3)
        {
            LogError("libjpeg: Unsupported number of channels: " + std::to_string(image.m_channels));
            jpeg_finish_decompress(&cinfo);
            jpeg_destroy_decompress(&cinfo);
            fclose(infile);
            return std::nullopt;
        }

        size_t num_pixels = static_cast<size_t>(image.m_width) * image.m_height;
        size_t total_floats = num_pixels * image.m_channels;
        image.m_pixelData.resize(total_floats);
        image.m_isLinear = true;                // We will convert to linear
        image.m_colorSpaceName = "Linear sRGB"; // Standard JPEG assumption

        // Allocate temporary buffer for one scanline
        int row_stride = cinfo.output_width * cinfo.output_components;
        std::vector<unsigned char> scanline_buffer(row_stride);
        JSAMPROW row_pointer[1];
        row_pointer[0] = scanline_buffer.data();

        float *app_data_ptr = image.m_pixelData.data();

        // Read scanlines
        while (cinfo.output_scanline < cinfo.output_height)
        {
            jpeg_read_scanlines(&cinfo, row_pointer, 1);
            // Convert scanline from 8-bit sRGB to linear float
            for (int i = 0; i < row_stride; ++i)
            {
                *app_data_ptr++ = srgb_to_linear_approx(static_cast<float>(scanline_buffer[i]) / 255.0f);
            }
        }

        // Finish decompression and clean up
        jpeg_finish_decompress(&cinfo);
        jpeg_destroy_decompress(&cinfo);
        fclose(infile);

        // Load EXIF data separately
        loadExifData(filePath, image.m_metadata);

        return image;
    }

    // --- libpng Loading ---
    // Custom error handler for libpng
    void pngErrorFunc(png_structp png_ptr, png_const_charp error_msg)
    {
        LogError("libpng: " + std::string(error_msg));
        jmp_buf *jmp_ptr = reinterpret_cast<jmp_buf *>(png_get_error_ptr(png_ptr));
        if (jmp_ptr)
        {
            longjmp(*jmp_ptr, 1);
        }
        // If no jmp_buf, just exit (shouldn't happen if setup correctly)
        exit(EXIT_FAILURE);
    }
    void pngWarningFunc(png_structp png_ptr, png_const_charp warning_msg)
    {
        LogWarning("libpng: " + std::string(warning_msg));
        // Don't longjmp on warnings
    }

    inline std::optional<AppImage> loadPng(const std::string &filePath)
    {
        FILE *fp = fopen(filePath.c_str(), "rb");
        if (!fp)
        {
            LogError("Cannot open PNG file: " + filePath);
            return std::nullopt;
        }

        // Check PNG signature
        unsigned char header[8];
        fread(header, 1, 8, fp);
        if (png_sig_cmp(header, 0, 8))
        {
            LogError("File is not a valid PNG: " + filePath);
            fclose(fp);
            return std::nullopt;
        }

        png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, pngErrorFunc, pngWarningFunc);
        if (!png_ptr)
        {
            LogError("libpng: png_create_read_struct failed");
            fclose(fp);
            return std::nullopt;
        }

        png_infop info_ptr = png_create_info_struct(png_ptr);
        if (!info_ptr)
        {
            LogError("libpng: png_create_info_struct failed");
            png_destroy_read_struct(&png_ptr, nullptr, nullptr);
            fclose(fp);
            return std::nullopt;
        }

        // Setup jump buffer for error handling
        jmp_buf jmpbuf;
        if (setjmp(jmpbuf))
        {
            LogError("libpng: Error during read");
            png_destroy_read_struct(&png_ptr, &info_ptr, nullptr);
            fclose(fp);
            return std::nullopt;
        }
        // Assign jump buffer to png error pointer
        // Note: The cast from jmp_buf* to png_voidp* might feel odd, but it's standard practice
        png_set_error_fn(png_ptr, reinterpret_cast<png_voidp>(&jmpbuf), pngErrorFunc, pngWarningFunc);

        png_init_io(png_ptr, fp);
        png_set_sig_bytes(png_ptr, 8); // We already read the 8 signature bytes

        // Read file info
        png_read_info(png_ptr, info_ptr);

        AppImage image;
        png_uint_32 png_width, png_height;
        int bit_depth, color_type, interlace_method, compression_method, filter_method;
        png_get_IHDR(png_ptr, info_ptr, &png_width, &png_height, &bit_depth, &color_type,
                     &interlace_method, &compression_method, &filter_method);

        image.m_width = png_width;
        image.m_height = png_height;

        // --- Transformations ---
        // We want linear float RGB or RGBA output

        // Handle palette -> RGB
        if (color_type == PNG_COLOR_TYPE_PALETTE)
        {
            png_set_palette_to_rgb(png_ptr);
        }
        // Handle low bit depth grayscale -> 8 bit
        if (color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8)
        {
            png_set_expand_gray_1_2_4_to_8(png_ptr);
            bit_depth = 8; // Update bit depth after expansion
        }
        // Handle transparency chunk -> Alpha channel
        if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS))
        {
            png_set_tRNS_to_alpha(png_ptr);
        }
        // Convert 16-bit -> 8-bit if needed (we handle 16 bit below, so maybe don't strip)
        // if (bit_depth == 16) {
        //     png_set_strip_16(png_ptr);
        //     bit_depth = 8;
        // }
        // Convert grayscale -> RGB
        if (color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
        {
            png_set_gray_to_rgb(png_ptr);
        }
        // Add alpha channel if missing but requested (we might always want RGBA internally)
        // if (color_type == PNG_COLOR_TYPE_RGB || color_type == PNG_COLOR_TYPE_GRAY) {
        //     png_set_add_alpha(png_ptr, 0xFF, PNG_FILLER_AFTER); // Add opaque alpha
        // }

        // --- Gamma Handling ---
        double file_gamma = 0.0;
        bool is_srgb = (png_get_sRGB(png_ptr, info_ptr, nullptr) != 0);

        if (is_srgb)
        {
            // If sRGB chunk is present, libpng can convert to linear for us
            png_set_gamma(png_ptr, 1.0, 0.45455); // Request linear output (screen gamma 2.2)
            image.m_isLinear = true;
            image.m_colorSpaceName = "Linear sRGB";
        }
        else if (png_get_gAMA(png_ptr, info_ptr, &file_gamma))
        {
            // If gAMA chunk is present, convert to linear
            png_set_gamma(png_ptr, 1.0, file_gamma);
            image.m_isLinear = true;
            image.m_colorSpaceName = "Linear Unknown (Gamma Corrected)";
        }
        else
        {
            // No gamma info, assume sRGB and convert manually later
            image.m_isLinear = false; // Data read will be sRGB
            image.m_colorSpaceName = "sRGB (Assumed)";
        }

        // Apply transformations
        png_read_update_info(png_ptr, info_ptr);

        // Get updated info after transformations
        image.m_channels = png_get_channels(png_ptr, info_ptr);
        bit_depth = png_get_bit_depth(png_ptr, info_ptr); // Update bit_depth after transforms

        if (image.m_channels < 3)
        {
            LogWarning("libpng: Resulting image has < 3 channels after transforms. Handling as RGB.");
            // Force RGB if needed? Be careful here. For simplicity, assume RGB/RGBA works.
        }

        // Allocate memory for the image data
        size_t num_pixels = static_cast<size_t>(image.m_width) * image.m_height;
        size_t total_floats = num_pixels * image.m_channels;
        image.m_pixelData.resize(total_floats);
        float *app_data_ptr = image.m_pixelData.data();

        // Allocate row pointers
        png_bytep *row_pointers = new png_bytep[image.m_height];
        size_t row_bytes = png_get_rowbytes(png_ptr, info_ptr);
        std::vector<unsigned char> image_buffer(row_bytes * image.m_height); // Read whole image at once

        for (png_uint_32 i = 0; i < image.m_height; ++i)
        {
            row_pointers[i] = image_buffer.data() + i * row_bytes;
        }

        // Read the entire image
        png_read_image(png_ptr, row_pointers);

        // Convert the read data to linear float
        unsigned char *buffer_ptr = image_buffer.data();
        if (bit_depth == 8)
        {
            for (size_t i = 0; i < total_floats; ++i)
            {
                float val = static_cast<float>(buffer_ptr[i]) / 255.0f;
                // Convert to linear if libpng didn't do it (i.e., no sRGB/gAMA chunk found)
                app_data_ptr[i] = image.m_isLinear ? val : srgb_to_linear_approx(val);
            }
        }
        else if (bit_depth == 16)
        {
            uint16_t *buffer_ptr16 = reinterpret_cast<uint16_t *>(buffer_ptr);
            // PNG 16-bit uses network byte order (big-endian)
            bool needs_swap = (png_get_uint_16((png_bytep) "\x01\x02") != 0x0102); // Check system endianness

            for (size_t i = 0; i < total_floats; ++i)
            {
                uint16_t raw_val = buffer_ptr16[i];
                if (needs_swap)
                { // Swap bytes if system is little-endian
                    raw_val = (raw_val >> 8) | (raw_val << 8);
                }
                float val = static_cast<float>(raw_val) / 65535.0f;
                // Convert to linear if libpng didn't do it
                app_data_ptr[i] = image.m_isLinear ? val : srgb_to_linear_approx(val);
            }
        }
        else
        {
            LogError("libpng: Unsupported bit depth after transforms: " + std::to_string(bit_depth));
            delete[] row_pointers;
            png_destroy_read_struct(&png_ptr, &info_ptr, nullptr);
            fclose(fp);
            return std::nullopt;
        }

        // If we assumed sRGB and converted manually, update state
        if (!image.m_isLinear)
        {
            image.m_isLinear = true;
            image.m_colorSpaceName = "Linear sRGB (Assumed)";
        }

        // Clean up
        delete[] row_pointers;
        png_read_end(png_ptr, nullptr); // Finish reading remaining chunks
        png_destroy_read_struct(&png_ptr, &info_ptr, nullptr);
        fclose(fp);

        // Note: PNG typically doesn't store EXIF in the same way as JPEG/TIFF.
        // It can have text chunks (tEXt, zTXt, iTXt) which might hold metadata.
        // Reading these requires additional libpng calls (png_get_text). Not implemented here.

        return image;
    }

    // --- libtiff Loading ---
    // Suppress libtiff warnings/errors (optional, can be noisy)
    void tiffErrorHandler(const char *module, const char *fmt, va_list ap) { /* Do nothing */ }
    void tiffWarningHandler(const char *module, const char *fmt, va_list ap) { /* Do nothing */ }

    inline std::optional<AppImage> loadTiff(const std::string &filePath)
    {
        // Set custom handlers to suppress console output from libtiff
        // TIFFSetErrorHandler(tiffErrorHandler);
        // TIFFSetWarningHandler(tiffWarningHandler);

        TIFF *tif = TIFFOpen(filePath.c_str(), "r");
        if (!tif)
        {
            LogError("Cannot open TIFF file: " + filePath);
            return std::nullopt;
        }

        AppImage image;
        uint32_t w, h;
        uint16_t bitsPerSample, samplesPerPixel, photometric, planarConfig;

        TIFFGetFieldDefaulted(tif, TIFFTAG_IMAGEWIDTH, &w);
        TIFFGetFieldDefaulted(tif, TIFFTAG_IMAGELENGTH, &h);
        TIFFGetFieldDefaulted(tif, TIFFTAG_BITSPERSAMPLE, &bitsPerSample);
        TIFFGetFieldDefaulted(tif, TIFFTAG_SAMPLESPERPIXEL, &samplesPerPixel);
        TIFFGetFieldDefaulted(tif, TIFFTAG_PHOTOMETRIC, &photometric);
        TIFFGetFieldDefaulted(tif, TIFFTAG_PLANARCONFIG, &planarConfig);

        image.m_width = w;
        image.m_height = h;
        image.m_channels = samplesPerPixel; // Usually 1 (Gray) or 3 (RGB) or 4 (RGBA)

        // --- Sanity Checks ---
        if (w == 0 || h == 0 || samplesPerPixel == 0)
        {
            LogError("libtiff: Invalid dimensions or samples per pixel.");
            TIFFClose(tif);
            return std::nullopt;
        }
        if (bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 32)
        {
            // Note: 32-bit float TIFFs exist but require different handling
            LogError("libtiff: Unsupported bits per sample: " + std::to_string(bitsPerSample) + ". Only 8/16 supported currently.");
            TIFFClose(tif);
            return std::nullopt;
        }
        if (photometric != PHOTOMETRIC_MINISBLACK && photometric != PHOTOMETRIC_MINISWHITE &&
            photometric != PHOTOMETRIC_RGB && photometric != PHOTOMETRIC_PALETTE &&
            photometric != PHOTOMETRIC_MASK && photometric != PHOTOMETRIC_SEPARATED /*CMYK?*/ &&
            photometric != PHOTOMETRIC_LOGL && photometric != PHOTOMETRIC_LOGLUV)
        {
            LogWarning("libtiff: Unhandled photometric interpretation: " + std::to_string(photometric));
            // We will try to read as RGB/Gray anyway... might be wrong.
        }

        // --- Data Reading ---
        // Use TIFFReadRGBAImage for simplicity - converts many formats to RGBA uint32 internally
        // Advantage: Handles various photometric interpretations, planar configs, palettes etc.
        // Disadvantage: Always gives 8-bit RGBA, loses 16-bit precision. Less control.

        // Alternative: Read scanlines manually (more complex, preserves bit depth)
        // Let's try the manual scanline approach to preserve bit depth

        size_t num_pixels = static_cast<size_t>(w) * h;
        size_t total_values = num_pixels * samplesPerPixel; // Total uint8/uint16 values
        image.m_pixelData.resize(total_values);             // Resize for float output
        image.m_isLinear = true;                            // Assume linear, correct later if gamma info found
        image.m_colorSpaceName = "Linear Unknown (TIFF)";   // Default assumption

        tmsize_t scanline_size = TIFFScanlineSize(tif);
        std::vector<unsigned char> scanline_buffer(scanline_size);

        float *app_data_ptr = image.m_pixelData.data();
        float max_val = (bitsPerSample == 8) ? 255.0f : 65535.0f; // Normalization factor

        if (planarConfig == PLANARCONFIG_CONTIG)
        {
            for (uint32_t row = 0; row < h; ++row)
            {
                if (TIFFReadScanline(tif, scanline_buffer.data(), row) < 0)
                {
                    LogError("libtiff: Error reading scanline " + std::to_string(row));
                    TIFFClose(tif);
                    return std::nullopt;
                }
                // Process the contiguous scanline
                if (bitsPerSample == 8)
                {
                    unsigned char *buf_ptr = scanline_buffer.data();
                    for (size_t i = 0; i < w * samplesPerPixel; ++i)
                    {
                        *app_data_ptr++ = static_cast<float>(buf_ptr[i]) / max_val;
                    }
                }
                else
                { // bitsPerSample == 16
                    uint16_t *buf_ptr = reinterpret_cast<uint16_t *>(scanline_buffer.data());
                    for (size_t i = 0; i < w * samplesPerPixel; ++i)
                    {
                        *app_data_ptr++ = static_cast<float>(buf_ptr[i]) / max_val;
                    }
                }
            }
        }
        else if (planarConfig == PLANARCONFIG_SEPARATE)
        {
            // Read plane by plane - more complex, needs buffer per plane
            LogWarning("libtiff: Planar configuration PLANARCONFIG_SEPARATE reading not fully implemented, data might be incorrect.");
            // Basic attempt: Read all scanlines for each plane sequentially into the final buffer
            size_t plane_stride = w * h;
            for (uint16_t plane = 0; plane < samplesPerPixel; ++plane)
            {
                float *plane_start_ptr = image.m_pixelData.data() + plane; // Start at the channel offset
                for (uint32_t row = 0; row < h; ++row)
                {
                    if (TIFFReadScanline(tif, scanline_buffer.data(), row, plane) < 0)
                    {
                        LogError("libtiff: Error reading scanline " + std::to_string(row) + " plane " + std::to_string(plane));
                        TIFFClose(tif);
                        return std::nullopt;
                    }
                    // Process the separate scanline for this plane
                    if (bitsPerSample == 8)
                    {
                        unsigned char *buf_ptr = scanline_buffer.data();
                        float *current_pixel_in_plane = plane_start_ptr + row * w * samplesPerPixel;
                        for (uint32_t col = 0; col < w; ++col)
                        {
                            *current_pixel_in_plane = static_cast<float>(buf_ptr[col]) / max_val;
                            current_pixel_in_plane += samplesPerPixel; // Jump to next pixel's spot for this channel
                        }
                    }
                    else
                    { // 16 bit
                        uint16_t *buf_ptr = reinterpret_cast<uint16_t *>(scanline_buffer.data());
                        float *current_pixel_in_plane = plane_start_ptr + row * w * samplesPerPixel;
                        for (uint32_t col = 0; col < w; ++col)
                        {
                            *current_pixel_in_plane = static_cast<float>(buf_ptr[col]) / max_val;
                            current_pixel_in_plane += samplesPerPixel;
                        }
                    }
                }
            }
        }
        else
        {
            LogError("libtiff: Unknown planar configuration: " + std::to_string(planarConfig));
            TIFFClose(tif);
            return std::nullopt;
        }

        // --- Post-processing based on Photometric interpretation ---
        // Handle grayscale inversion
        if (photometric == PHOTOMETRIC_MINISWHITE)
        {
            LogWarning("libtiff: Inverting MINISWHITE image.");
            for (float &val : image.m_pixelData)
            {
                val = 1.0f - val; // Simple inversion
            }
        }

        // TODO: Handle Palette -> RGB (needs reading the colormap tag)
        if (photometric == PHOTOMETRIC_PALETTE)
        {
            LogWarning("libtiff: PHOTOMETRIC_PALETTE not fully handled. Image loaded as indexed.");
            // Requires reading TIFFTAG_COLORMAP and expanding pixels
        }

        // TODO: Check for gamma tags or ICC profile tag
        // uint16_t* icc_profile_count = nullptr;
        // void* icc_profile_data = nullptr;
        // if (TIFFGetField(tif, TIFFTAG_ICCPROFILE, &icc_profile_count, &icc_profile_data) && icc_profile_count && icc_profile_data) {
        //      image.m_iccProfile.resize(*icc_profile_count);
        //      std::memcpy(image.m_iccProfile.data(), icc_profile_data, *icc_profile_count);
        //      image.m_colorSpaceName = "Linear (Embedded ICC)"; // Or just "(Embedded ICC)"
        // } else {
        //     // Check for gamma? Not standard. Assume sRGB/linear for now.
        // }

        // If no specific color info found, assume sRGB and convert to linear
        // For TIFF, it's often safer to assume linear if 16-bit, sRGB if 8-bit without other info.
        if (bitsPerSample == 8)
        {
            LogWarning("libtiff: Assuming 8-bit TIFF is sRGB. Converting to linear.");
            for (float &val : image.m_pixelData)
            {
                val = srgb_to_linear_approx(val);
            }
            image.m_isLinear = true;
            image.m_colorSpaceName = "Linear sRGB (Assumed)";
        }
        else
        {
            LogWarning("libtiff: Assuming 16-bit TIFF is already linear.");
            image.m_isLinear = true;
            image.m_colorSpaceName = "Linear Unknown (TIFF)";
        }

        TIFFClose(tif);

        // Try loading EXIF using LibTiff directory reading or Exiv2 (not EasyExif)
        // This basic example doesn't load EXIF from TIFFs.
        // You could use Exiv2 here if integrated.
        LogWarning("EXIF loading from TIFF not implemented in this example.");

        return image;
    }

} // namespace AppImageUtil

// --- AppImage Constructor Implementation ---
AppImage::AppImage(uint32_t width, uint32_t height, uint32_t channels)
    : m_width(width), m_height(height), m_channels(channels), m_isLinear(true)
{
    if (width > 0 && height > 0 && channels > 0)
    {
        try
        {
            m_pixelData.resize(static_cast<size_t>(width) * height * channels);
        }
        catch (const std::bad_alloc &e)
        {
            AppImageUtil::LogError("Failed to allocate memory for image: " + std::string(e.what()));
            clear_image(); // Reset to empty state
            throw;         // Re-throw exception
        }
    }
    // Default assumption is linear data in our internal format
    m_colorSpaceName = "Linear Generic";
}

void AppImage::resize(uint32_t newWidth, uint32_t newHeight, uint32_t newChannels)
{
    if (newChannels == 0)
        newChannels = m_channels;
    if (newChannels == 0)
        newChannels = 3; // Default if was empty

    m_width = newWidth;
    m_height = newHeight;
    m_channels = newChannels;

    if (newWidth == 0 || newHeight == 0 || newChannels == 0)
    {
        m_pixelData.clear();
        // Keep metadata? Optional.
    }
    else
    {
        try
        {
            m_pixelData.resize(static_cast<size_t>(newWidth) * newHeight * newChannels);
            // Note: Resizing doesn't preserve pixel content intelligently.
            // Consider adding different resize modes (clear, copy existing, etc.)
        }
        catch (const std::bad_alloc &e)
        {
            AppImageUtil::LogError("Failed to allocate memory during resize: " + std::string(e.what()));
            clear_image();
            throw;
        }
    }
}

void AppImage::clear_image()
{
    m_width = 0;
    m_height = 0;
    m_channels = 0;
    m_pixelData.clear();
    m_metadata.clear();
    m_iccProfile.clear();
    m_colorSpaceName = "Unknown";
    m_isLinear = true;
}

// --- loadImage Implementation ---
std::optional<AppImage> loadImage(const std::string &filePath)
{
    using namespace AppImageUtil;

    DetectedFileType type = detectFileType(filePath);

    try
    {
        switch (type)
        {
        case DetectedFileType::RAW:
            LogWarning("Detected type: RAW (using LibRaw)");
            return loadRaw(filePath);
        case DetectedFileType::JPEG:
            LogWarning("Detected type: JPEG (using libjpeg)");
            return loadJpeg(filePath);
        case DetectedFileType::PNG:
            LogWarning("Detected type: PNG (using libpng)");
            return loadPng(filePath);
        case DetectedFileType::TIFF:
            LogWarning("Detected type: TIFF (using libtiff)");
            // LibRaw can sometimes open TIFFs that contain RAW data. Try it first?
            // For now, directly use libtiff.
            return loadTiff(filePath);
        case DetectedFileType::UNKNOWN:
        default:
            LogError("Unknown or unsupported file type: " + filePath);
            return std::nullopt;
        }
    }
    catch (const std::exception &e)
    {
        LogError("Exception caught during image loading: " + std::string(e.what()));
        return std::nullopt;
    }
    catch (...)
    {
        LogError("Unknown exception caught during image loading.");
        return std::nullopt;
    }
}

// --- saveImage Implementation ---

namespace AppImageUtil
{

    // --- libjpeg Saving ---
    inline bool saveJpeg(const AppImage &image, const std::string &filePath, int quality)
    {
        if (image.getChannels() != 1 && image.getChannels() != 3)
        {
            LogError("libjpeg save: Can only save 1 (Grayscale) or 3 (RGB) channels. Image has " + std::to_string(image.getChannels()));
            return false;
        }

        FILE *outfile = fopen(filePath.c_str(), "wb");
        if (!outfile)
        {
            LogError("Cannot open file for JPEG writing: " + filePath);
            return false;
        }

        jpeg_compress_struct cinfo;
        JpegErrorManager jerr; // Use the same error manager as loading

        // Setup error handling
        cinfo.err = jpeg_std_error(&jerr.pub);
        jerr.pub.error_exit = jpegErrorExit; // Use the same exit function
        if (setjmp(jerr.setjmp_buffer))
        {
            // Error occurred during compression
            jpeg_destroy_compress(&cinfo);
            fclose(outfile);
            return false;
        }

        // Initialize compression object
        jpeg_create_compress(&cinfo);
        jpeg_stdio_dest(&cinfo, outfile);

        // Set parameters
        cinfo.image_width = image.getWidth();
        cinfo.image_height = image.getHeight();
        cinfo.input_components = image.getChannels();
        cinfo.in_color_space = (image.getChannels() == 1) ? JCS_GRAYSCALE : JCS_RGB;

        jpeg_set_defaults(&cinfo);
        jpeg_set_quality(&cinfo, std::max(1, std::min(100, quality)), TRUE /* limit to baseline-JPEG */);
        // Could set density, comments, etc. here if needed using jpeg_set_... functions

        // Start compressor
        jpeg_start_compress(&cinfo, TRUE);

        // Prepare 8-bit sRGB scanline buffer
        int row_stride = cinfo.image_width * cinfo.input_components;
        std::vector<unsigned char> scanline_buffer(row_stride);
        const float *app_data = image.getData();

        // Process scanlines
        while (cinfo.next_scanline < cinfo.image_height)
        {
            unsigned char *buffer_ptr = scanline_buffer.data();
            size_t row_start_index = static_cast<size_t>(cinfo.next_scanline) * cinfo.image_width * cinfo.input_components;

            // Convert one row from linear float to 8-bit sRGB uchar
            for (int i = 0; i < row_stride; ++i)
            {
                float linear_val = app_data[row_start_index + i];
                float srgb_val = linear_to_srgb_approx(linear_val);
                int int_val = static_cast<int>(std::round(srgb_val * 255.0f));
                buffer_ptr[i] = static_cast<unsigned char>(std::max(0, std::min(255, int_val)));
            }

            JSAMPROW row_pointer[1];
            row_pointer[0] = scanline_buffer.data();
            jpeg_write_scanlines(&cinfo, row_pointer, 1);
        }

        // Finish compression and clean up
        jpeg_finish_compress(&cinfo);
        jpeg_destroy_compress(&cinfo);
        fclose(outfile);

        // --- Metadata Saving ---
        LogWarning("JPEG EXIF/ICC Metadata saving is NOT implemented.");
        // Saving metadata would typically involve:
        // 1. Using Exiv2 library.
        // 2. Opening the file *after* libjpeg saves the pixels.
        // 3. Writing the metadata from image.m_metadata and image.m_iccProfile into the file structure.

        return true;
    }

    // --- libpng Saving ---
    inline bool savePng(const AppImage &image, const std::string &filePath, int bit_depth_out)
    {
        if (bit_depth_out != 8 && bit_depth_out != 16)
        {
            LogError("libpng save: Only 8 or 16 bit output supported.");
            return false;
        }
        if (image.getChannels() < 1 || image.getChannels() > 4 || image.getChannels() == 2)
        {
            LogError("libpng save: Can only save 1 (Gray), 3 (RGB), or 4 (RGBA) channels. Image has " + std::to_string(image.getChannels()));
            return false;
        }

        FILE *fp = fopen(filePath.c_str(), "wb");
        if (!fp)
        {
            LogError("Cannot open file for PNG writing: " + filePath);
            return false;
        }

        png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, pngErrorFunc, pngWarningFunc);
        if (!png_ptr)
        {
            LogError("libpng: png_create_write_struct failed");
            fclose(fp);
            return false;
        }

        png_infop info_ptr = png_create_info_struct(png_ptr);
        if (!info_ptr)
        {
            LogError("libpng: png_create_info_struct failed");
            png_destroy_write_struct(&png_ptr, nullptr);
            fclose(fp);
            return false;
        }

        // Setup jump buffer for error handling
        jmp_buf jmpbuf;
        if (setjmp(jmpbuf))
        {
            LogError("libpng: Error during write");
            png_destroy_write_struct(&png_ptr, &info_ptr);
            fclose(fp);
            return false;
        }
        png_set_error_fn(png_ptr, reinterpret_cast<png_voidp>(&jmpbuf), pngErrorFunc, pngWarningFunc);

        png_init_io(png_ptr, fp);

        // Determine PNG color type
        int color_type;
        switch (image.getChannels())
        {
        case 1:
            color_type = PNG_COLOR_TYPE_GRAY;
            break;
        case 3:
            color_type = PNG_COLOR_TYPE_RGB;
            break;
        case 4:
            color_type = PNG_COLOR_TYPE_RGB_ALPHA;
            break;
        default: /* Should have been caught earlier */
            return false;
        }

        // Set IHDR chunk
        png_set_IHDR(png_ptr, info_ptr, image.getWidth(), image.getHeight(),
                     bit_depth_out, color_type,
                     PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);

        // Set Gamma/sRGB info
        bool save_as_srgb = (bit_depth_out == 8); // Convention: Save 8-bit as sRGB, 16-bit as linear
        if (save_as_srgb)
        {
            png_set_sRGB_gAMA_and_cHRM(png_ptr, info_ptr, PNG_sRGB_INTENT_PERCEPTUAL);
            LogWarning("libpng save: Saving 8-bit PNG with sRGB chunk.");
        }
        else
        {                                         // 16-bit linear
            png_set_gAMA(png_ptr, info_ptr, 1.0); // Explicitly linear gamma
            LogWarning("libpng save: Saving 16-bit PNG with gamma 1.0 (linear).");
        }

        // Write header info
        png_write_info(png_ptr, info_ptr);

        // --- Prepare Data ---
        std::vector<png_bytep> row_pointers(image.getHeight());
        size_t values_per_row = static_cast<size_t>(image.getWidth()) * image.getChannels();
        size_t bytes_per_value = (bit_depth_out == 8) ? 1 : 2;
        size_t row_bytes = values_per_row * bytes_per_value;
        std::vector<unsigned char> output_buffer(row_bytes * image.getHeight());

        const float *app_data = image.getData();
        bool needs_swap = (bit_depth_out == 16 && (png_get_uint_16((png_bytep) "\x01\x02") != 0x0102)); // Check endianness only for 16-bit

        // Convert internal float data to target format row by row
        for (uint32_t y = 0; y < image.getHeight(); ++y)
        {
            unsigned char *row_buf_ptr = output_buffer.data() + y * row_bytes;
            row_pointers[y] = row_buf_ptr;
            size_t row_start_index = static_cast<size_t>(y) * values_per_row;

            if (bit_depth_out == 8)
            {
                unsigned char *uchar_ptr = row_buf_ptr;
                for (size_t i = 0; i < values_per_row; ++i)
                {
                    float linear_val = app_data[row_start_index + i];
                    float srgb_val = linear_to_srgb_approx(linear_val); // Convert to sRGB for 8-bit output
                    int int_val = static_cast<int>(std::round(srgb_val * 255.0f));
                    uchar_ptr[i] = static_cast<unsigned char>(std::max(0, std::min(255, int_val)));
                }
            }
            else
            { // 16-bit
                uint16_t *ushort_ptr = reinterpret_cast<uint16_t *>(row_buf_ptr);
                for (size_t i = 0; i < values_per_row; ++i)
                {
                    float linear_val = app_data[row_start_index + i];
                    // Clamp linear value before scaling for 16-bit output (0.0 to 1.0 range typical for linear PNG)
                    linear_val = std::fmax(0.0f, std::fmin(1.0f, linear_val));
                    int int_val = static_cast<int>(std::round(linear_val * 65535.0f));
                    uint16_t val16 = static_cast<uint16_t>(std::max(0, std::min(65535, int_val)));

                    if (needs_swap)
                    { // Swap bytes for big-endian PNG format
                        ushort_ptr[i] = (val16 >> 8) | (val16 << 8);
                    }
                    else
                    {
                        ushort_ptr[i] = val16;
                    }
                }
            }
        }

        // Write image data
        png_write_image(png_ptr, row_pointers.data());

        // End writing
        png_write_end(png_ptr, nullptr);

        // Clean up
        png_destroy_write_struct(&png_ptr, &info_ptr);
        fclose(fp);

        LogWarning("PNG Metadata saving (text chunks, ICC) is NOT implemented.");

        return true;
    }

    // --- libtiff Saving ---
    inline bool saveTiff(const AppImage &image, const std::string &filePath, int bit_depth_out)
    {
        if (bit_depth_out != 8 && bit_depth_out != 16)
        {
            LogError("libtiff save: Only 8 or 16 bit output supported.");
            return false;
        }
        if (image.getChannels() < 1 || image.getChannels() > 4 || image.getChannels() == 2)
        {
            LogError("libtiff save: Can only save 1 (Gray), 3 (RGB), or 4 (RGBA) channels. Image has " + std::to_string(image.getChannels()));
            return false;
        }

        TIFF *tif = TIFFOpen(filePath.c_str(), "w");
        if (!tif)
        {
            LogError("Cannot open file for TIFF writing: " + filePath);
            return false;
        }

        // --- Set Core TIFF Tags ---
        TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, image.getWidth());
        TIFFSetField(tif, TIFFTAG_IMAGELENGTH, image.getHeight());
        TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, static_cast<uint16_t>(image.getChannels()));
        TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, static_cast<uint16_t>(bit_depth_out));
        TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
        TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); // Interleaved is simpler

        // Set Photometric Interpretation
        uint16_t photometric;
        if (image.getChannels() == 1)
        {
            photometric = PHOTOMETRIC_MINISBLACK; // Grayscale
        }
        else if (image.getChannels() >= 3)
        {
            photometric = PHOTOMETRIC_RGB; // RGB or RGBA
            if (image.getChannels() == 4)
            {
                // Need to specify that the extra channel is Alpha
                uint16_t extra_samples = 1;
                uint16_t sample_info[] = {EXTRASAMPLE_ASSOCALPHA}; // Associated alpha
                TIFFSetField(tif, TIFFTAG_EXTRASAMPLES, extra_samples, sample_info);
            }
        }
        else
        {
            LogError("libtiff save: Unexpected channel count: " + std::to_string(image.getChannels()));
            TIFFClose(tif);
            return false;
        }
        TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, photometric);

        // Compression (optional, default is none)
        TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
        // Examples: COMPRESSION_LZW, COMPRESSION_ADOBE_DEFLATE

        // Rows per strip (can affect performance/compatibility)
        // A sensible default is often related to scanline buffer size.
        TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, TIFFDefaultStripSize(tif, (uint32_t)-1));

        // Software Tag (optional)
        TIFFSetField(tif, TIFFTAG_SOFTWARE, "AppImage Saver");

        // --- Prepare and Write Data ---
        size_t values_per_row = static_cast<size_t>(image.getWidth()) * image.getChannels();
        size_t bytes_per_value = (bit_depth_out == 8) ? 1 : 2;
        tmsize_t row_bytes = values_per_row * bytes_per_value;
        std::vector<unsigned char> output_buffer(row_bytes); // Buffer for one row

        const float *app_data = image.getData();
        bool save_as_srgb = (bit_depth_out == 8); // Convention: 8-bit=sRGB, 16-bit=Linear

        for (uint32_t y = 0; y < image.getHeight(); ++y)
        {
            unsigned char *row_buf_ptr = output_buffer.data();
            size_t row_start_index = static_cast<size_t>(y) * values_per_row;

            if (bit_depth_out == 8)
            {
                unsigned char *uchar_ptr = row_buf_ptr;
                for (size_t i = 0; i < values_per_row; ++i)
                {
                    float linear_val = app_data[row_start_index + i];
                    float srgb_val = linear_to_srgb_approx(linear_val); // Convert to sRGB
                    int int_val = static_cast<int>(std::round(srgb_val * 255.0f));
                    uchar_ptr[i] = static_cast<unsigned char>(std::max(0, std::min(255, int_val)));
                }
            }
            else
            { // 16-bit
                uint16_t *ushort_ptr = reinterpret_cast<uint16_t *>(row_buf_ptr);
                for (size_t i = 0; i < values_per_row; ++i)
                {
                    float linear_val = app_data[row_start_index + i];
                    // Clamp linear [0,1] before scaling
                    linear_val = std::fmax(0.0f, std::fmin(1.0f, linear_val));
                    int int_val = static_cast<int>(std::round(linear_val * 65535.0f));
                    ushort_ptr[i] = static_cast<uint16_t>(std::max(0, std::min(65535, int_val)));
                    // Note: TIFF uses native byte order by default, no swapping needed usually.
                }
            }

            // Write the scanline
            if (TIFFWriteScanline(tif, row_buf_ptr, y, 0) < 0)
            {
                LogError("libtiff save: Error writing scanline " + std::to_string(y));
                TIFFClose(tif);
                return false;
            }
        }

        // Clean up
        TIFFClose(tif);

        LogWarning("TIFF EXIF/ICC Metadata saving is NOT implemented.");
        // Saving metadata requires:
        // 1. Using Exiv2 or LibTiff's directory writing functions *before* closing the file.
        // 2. For ICC: TIFFSetField(tif, TIFFTAG_ICCPROFILE, count, data_ptr);

        return true;
    }

} // namespace AppImageUtil

namespace ImGuiImageViewerUtil {
    // Linear float [0,1+] -> sRGB approx [0,1]
    inline float linear_to_srgb_approx(float linearVal) {
        if (linearVal <= 0.0f) return 0.0f;
        linearVal = std::fmax(0.0f, std::fmin(1.0f, linearVal)); // Clamp for display
        if (linearVal <= 0.0031308f) { return linearVal * 12.92f; }
        else { return 1.055f * std::pow(linearVal, 1.0f / 2.4f) - 0.055f; }
    }

    // Round float to nearest integer
    inline float Round(float f) { return ImFloor(f + 0.5f); }
} // namespace ImGuiImageViewerUtil


bool loadImageTexture(AppImage &appImage)
{
    if (appImage.isEmpty() || appImage.getWidth() == 0 || appImage.getHeight() == 0)
    {
        AppImageUtil::LogError("loadImageTexture: Image is empty.");
        return false;
    }
    if (!appImage.isLinear())
    {
        // This shouldn't happen if loadImage converts correctly, but good practice to check.
        AppImageUtil::LogWarning("loadImageTexture: Warning - Image data is not linear. Pipeline expects linear input.");
        // Ideally, convert to linear here if not already done. For now, proceed with caution.
    }

    const int width = static_cast<int>(appImage.getWidth());
    const int height = static_cast<int>(appImage.getHeight());
    const int channels = static_cast<int>(appImage.getChannels());
    const float *linearData = appImage.getData();
    size_t numFloats = static_cast<size_t>(width) * height * channels;

    if (!linearData || numFloats == 0) {
         AppImageUtil::LogError("loadImageTexture: Image data pointer is null or size is zero.");
         return false;
    }

    // --- Determine OpenGL texture format ---
    GLenum internalFormat;
    GLenum dataFormat;
    GLenum dataType = GL_FLOAT;
    std::vector<float> textureDataBuffer; // Temporary buffer if we need to convert format (e.g., RGB -> RGBA)

    const float* dataPtr = linearData;

    if (channels == 1) {
        internalFormat = GL_R16F; // Single channel, 16-bit float
        dataFormat = GL_RED;
        // Expand Grayscale to RGBA for easier shader handling (optional, shaders could handle GL_RED)
        // Example: Expand to RGBA float buffer
        textureDataBuffer.resize(static_cast<size_t>(width) * height * 4);
        float* outPtr = textureDataBuffer.data();
        for(int i = 0; i < width * height; ++i) {
             float val = linearData[i];
             *outPtr++ = val;
             *outPtr++ = val;
             *outPtr++ = val;
             *outPtr++ = 1.0f; // Alpha
        }
        internalFormat = GL_RGBA16F; // Use RGBA16F if expanding
        dataFormat = GL_RGBA;
        dataPtr = textureDataBuffer.data(); // Point to the new buffer
        AppImageUtil::LogWarning("loadImageTexture: Expanding 1-channel to RGBA16F for texture.");

    } else if (channels == 3) {
        internalFormat = GL_RGBA16F; // Store as RGBA, easier for FBOs/blending
        dataFormat = GL_RGBA;
        // Need to convert RGB float -> RGBA float
        textureDataBuffer.resize(static_cast<size_t>(width) * height * 4);
        float* outPtr = textureDataBuffer.data();
        const float* inPtr = linearData;
        for(int i = 0; i < width * height; ++i) {
            *outPtr++ = *inPtr++; // R
            *outPtr++ = *inPtr++; // G
            *outPtr++ = *inPtr++; // B
            *outPtr++ = 1.0f;     // A
        }
        dataPtr = textureDataBuffer.data(); // Point to the new buffer
        AppImageUtil::LogWarning("loadImageTexture: Expanding 3-channel RGB to RGBA16F for texture.");
    } else if (channels == 4) {
        internalFormat = GL_RGBA16F; // Native RGBA
        dataFormat = GL_RGBA;
        dataPtr = linearData; // Use original data directly
    } else {
        AppImageUtil::LogError("loadImageTexture: Unsupported number of channels: " + std::to_string(channels));
        return false;
    }

    // --- Upload to OpenGL Texture ---
    GLint lastTexture;
    glGetIntegerv(GL_TEXTURE_BINDING_2D, &lastTexture);

    if (appImage.m_textureId == 0) {
        glGenTextures(1, &appImage.m_textureId);
         AppImageUtil::LogWarning("loadImageTexture: Generated new texture ID: " + std::to_string(appImage.m_textureId));
    } else {
         AppImageUtil::LogWarning("loadImageTexture: Reusing texture ID: " + std::to_string(appImage.m_textureId));
    }


    glBindTexture(GL_TEXTURE_2D, appImage.m_textureId);
    // Use GL_LINEAR for smoother results when zooming/scaling in the viewer, even if processing is nearest neighbor.
    // The processing pipeline itself uses FBOs, textures don't need mipmaps typically.
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    glPixelStorei(GL_UNPACK_ALIGNMENT, 1); // Ensure correct alignment, especially for RGB data
    glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); // Data is contiguous

    // Check if texture dimensions/format need updating
    bool needsTexImage = true;
    if (appImage.m_textureWidth == width && appImage.m_textureHeight == height) {
         // Could potentially use glTexSubImage2D if format matches, but glTexImage2D is safer
         // if the internal format might change or if it's the first load.
         // For simplicity, we'll just recreate with glTexImage2D.
         AppImageUtil::LogWarning("loadImageTexture: Texture dimensions match, overwriting with glTexImage2D.");
    } else {
        AppImageUtil::LogWarning("loadImageTexture: Texture dimensions or format mismatch, recreating with glTexImage2D.");
    }


    glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, dataFormat, dataType, dataPtr);
    GLenum err = glGetError();
     if (err != GL_NO_ERROR) {
        AppImageUtil::LogError("loadImageTexture: OpenGL Error after glTexImage2D: " + std::to_string(err));
        glBindTexture(GL_TEXTURE_2D, lastTexture); // Restore previous binding
        // Consider deleting the texture ID if creation failed badly?
        if (appImage.m_textureId != 0) {
            glDeleteTextures(1, &appImage.m_textureId);
            appImage.m_textureId = 0;
        }
        return false;
    } else {
         AppImageUtil::LogWarning("loadImageTexture: glTexImage2D successful.");
    }


    // Optional: Generate mipmaps if you want smoother downscaling *in the final view*
    // glGenerateMipmap(GL_TEXTURE_2D);
    // glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);


    glBindTexture(GL_TEXTURE_2D, lastTexture); // Restore previous binding

    appImage.m_textureWidth = width;
    appImage.m_textureHeight = height;

    AppImageUtil::LogWarning("loadImageTexture: Successfully loaded linear data into texture ID " + std::to_string(appImage.m_textureId));

    return true;
}

// --- saveImage Implementation ---
bool saveImage(const AppImage &image,
               const std::string &filePath,
               ImageSaveFormat format,
               int quality)
{
    using namespace AppImageUtil;

    if (image.isEmpty())
    {
        LogError("Cannot save an empty image.");
        return false;
    }

    // Ensure internal data is linear before saving (or handle conversion if needed)
    if (!image.isLinear())
    {
        LogWarning("Attempting to save non-linear internal data. Results may be incorrect if conversion to target space isn't handled properly.");
        // Ideally, convert to linear here if required by the saving functions.
        // For this implementation, we assume the saving functions expect linear input
        // and perform the linear -> target space conversion (e.g., linear -> sRGB).
    }

    try
    {
        switch (format)
        {
        case ImageSaveFormat::JPEG:
            return saveJpeg(image, filePath, quality);
        case ImageSaveFormat::PNG_8:
            return savePng(image, filePath, 8);
        case ImageSaveFormat::PNG_16:
            return savePng(image, filePath, 16);
        case ImageSaveFormat::TIFF_8:
            return saveTiff(image, filePath, 8);
        case ImageSaveFormat::TIFF_16:
            return saveTiff(image, filePath, 16);
        case ImageSaveFormat::UNKNOWN:
        default:
            LogError("Unknown or unsupported save format specified.");
            return false;
        }
    }
    catch (const std::exception &e)
    {
        LogError("Exception caught during image saving: " + std::string(e.what()));
        return false;
    }
    catch (...)
    {
        LogError("Unknown exception caught during image saving.");
        return false;
    }
}

#endif // APP_IMAGE_IMPLEMENTATION

#endif // APP_IMAGE_H