ReaLens / app.py
Abubakar740
Upload LDR to HDR application files
2232b2c
# import gradio as gr
# import torch
# import os
# import tempfile
# import shutil
# from PIL import Image
# import numpy as np
# from pathlib import Path
# import sys
# import copy
# # --- Import logic from your project ---
# from options.test_options import TestOptions
# from data import create_dataset
# from models import create_model
# try:
# from best_ldr import compute_metrics_for_images, score_records
# except ImportError:
# raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
# print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
# # --- Global Setup: Load the CycleGAN model once when the app starts ---
# # We need to satisfy the parser's requirement for a dataroot at startup
# if '--dataroot' not in sys.argv:
# sys.argv.extend(['--dataroot', './dummy_dataroot_for_init'])
# # Load the base options
# opt = TestOptions().parse()
# # Manually override settings for our model
# opt.name = 'ldr2hdr_cyclegan_728'
# opt.model = 'test'
# opt.netG = 'resnet_9blocks'
# opt.norm = 'instance'
# opt.no_dropout = True
# opt.checkpoints_dir = './checkpoints'
# opt.gpu_ids = [0] if torch.cuda.is_available() else []
# opt.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
# # Create the model using these options
# model = create_model(opt)
# model.setup(opt)
# model.eval()
# print("--- Model Loaded Successfully ---")
# # --- Helper Function for Inference ---
# def run_inference(model, image_path, process_options):
# """
# A reusable function to run the model with specific preprocessing options.
# """
# # Deep copy the base options to avoid modifying the global state
# local_opt = copy.deepcopy(opt)
# # Apply the specific settings for this run
# for key, value in process_options.items():
# setattr(local_opt, key, value)
# with tempfile.TemporaryDirectory() as temp_dir:
# shutil.copy(image_path, temp_dir)
# local_opt.dataroot = temp_dir
# local_opt.num_test = 1
# dataset = create_dataset(local_opt)
# for i, data in enumerate(dataset):
# model.set_input(data)
# model.test()
# visuals = model.get_current_visuals()
# for label, image_tensor in visuals.items():
# if label == 'fake':
# image_numpy = (np.transpose(image_tensor.cpu().float().numpy()[0], (1, 2, 0)) + 1) / 2.0 * 255.0
# return Image.fromarray(image_numpy.astype(np.uint8))
# # --- The Main Gradio Processing Function ---
# def process_images_and_display(list_of_temp_files):
# """
# The main workflow: select best LDR, then run two inference modes.
# """
# if not list_of_temp_files:
# raise gr.Error("Please upload your bracketed LDR images.")
# if len(list_of_temp_files) < 2:
# gr.Warning("For best results, upload at least 2 bracketed LDR images.")
# uploaded_filepaths = [Path(f.name) for f in list_of_temp_files]
# try:
# # --- Step 1: Select the Best LDR ---
# print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
# weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
# records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
# scored_records = score_records(records, weights)
# if not scored_records:
# raise gr.Error("Could not read or score any of the uploaded images.")
# best_ldr_record = scored_records[0]
# best_ldr_path = best_ldr_record['path']
# print(f"Best LDR selected: {os.path.basename(best_ldr_path)} (Score: {best_ldr_record['score']:.4f})")
# chosen_ldr_image = Image.open(best_ldr_path).convert("RGB")
# # --- Step 2: Run Inference in Both Modes ---
# # Mode A: High-Quality Crop (at model's native resolution)
# print("Running Mode A: High-Quality Crop...")
# crop_options = {
# 'preprocess': 'resize_and_crop',
# 'load_size': 728,
# 'crop_size': 728
# }
# hdr_cropped = run_inference(model, best_ldr_path, crop_options)
# print("Mode A successful.")
# # Mode B: Full Image (at a higher resolution)
# print("Running Mode B: Full Image (High-Res Scaled)...")
# scale_options = {
# 'preprocess': 'scale_width',
# 'load_size': 1024, # <-- THIS IS THE CHANGE FOR HIGHER RESOLUTION
# 'crop_size': 728 # This value is ignored by scale_width but needs to be present
# }
# hdr_scaled = run_inference(model, best_ldr_path, scale_options)
# print("Mode B successful.")
# # Return all the images to update the UI
# return uploaded_filepaths, chosen_ldr_image, hdr_cropped, hdr_scaled
# except Exception as e:
# print(f"An error occurred: {e}")
# raise gr.Error(f"An error occurred during processing: {e}")
# # --- Create and Launch the Gradio Interface ---
# with gr.Blocks(theme=gr.themes.Monochrome(), css="footer {display: none !important}") as demo:
# gr.Markdown("# LDR Bracketing to HDR Converter")
# gr.Markdown("Upload a set of bracketed LDR images. The app will automatically select the best one and convert it to HDR using two different methods for comparison.")
# with gr.Row():
# with gr.Column(scale=1, min_width=300):
# input_files = gr.Files(
# label="Upload Bracketed LDR Images",
# file_types=["image"]
# )
# process_button = gr.Button("Process Images", variant="primary")
# with gr.Accordion("See Your Uploads", open=False):
# input_gallery = gr.Gallery(label="Uploaded LDR Bracket", show_label=False, columns=3, height="auto")
# with gr.Column(scale=2):
# gr.Markdown("## Results")
# with gr.Row():
# chosen_ldr_display = gr.Image(label="Best LDR Chosen by Algorithm", type="pil", interactive=False)
# with gr.Row():
# output_cropped = gr.Image(label="Result 1: High-Quality Crop (728x728)", type="pil", interactive=False)
# output_scaled = gr.Image(label="Result 2: Full Image (Scaled to 1024px Width)", type="pil", interactive=False)
# process_button.click(
# fn=process_images_and_display,
# inputs=input_files,
# outputs=[input_gallery, chosen_ldr_display, output_cropped, output_scaled]
# )
# print("--- Launching Gradio App ---")
# demo.launch(share=True)
import gradio as gr
import torch
import os
import tempfile
import shutil
from PIL import Image
import numpy as np
from pathlib import Path
import sys
import copy
# --- Import logic from your project ---
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
try:
from best_ldr import compute_metrics_for_images, score_records
except ImportError:
raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
# --- Global Setup: Load the CycleGAN model once when the app starts ---
# We need to satisfy the parser's requirement for a dataroot at startup
if '--dataroot' not in sys.argv:
sys.argv.extend(['--dataroot', './dummy_dataroot_for_init'])
# Load the base options
opt = TestOptions().parse()
# Manually override settings for our model
opt.name = 'ldr2hdr_cyclegan_728'
opt.model = 'test'
opt.netG = 'resnet_9blocks'
opt.norm = 'instance'
opt.no_dropout = True
opt.checkpoints_dir = './checkpoints'
opt.gpu_ids = [0] if torch.cuda.is_available() else []
opt.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
# Create the model using these options
model = create_model(opt)
model.setup(opt)
model.eval()
print("--- Model Loaded Successfully ---")
# --- The Main Gradio Processing Function ---
def process_images_to_hdr(list_of_temp_files):
"""
The main workflow: select best LDR, run inference, and return results for the UI.
"""
if not list_of_temp_files:
raise gr.Error("Please upload your bracketed LDR images.")
if len(list_of_temp_files) < 2:
gr.Warning("For best results, upload at least 2 bracketed LDR images.")
uploaded_filepaths = [Path(f.name) for f in list_of_temp_files]
try:
# --- Step 1: Select the Best LDR ---
print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
scored_records = score_records(records, weights)
if not scored_records:
raise gr.Error("Could not read or score any of the uploaded images.")
best_ldr_record = scored_records[0]
best_ldr_path = best_ldr_record['path']
print(f"Best LDR selected: {os.path.basename(best_ldr_path)} (Score: {best_ldr_record['score']:.4f})")
# --- Step 2: Run Inference ---
print("Running Full Image (High-Res Scaled) Inference...")
# We only need the one set of options now
inference_options = {
'preprocess': 'scale_width',
'load_size': 1024, # Generate the high-resolution, full image
'crop_size': 728 # This value is ignored but required by the parser
}
# Deep copy the base options to avoid modifying the global state
local_opt = copy.deepcopy(opt)
for key, value in inference_options.items():
setattr(local_opt, key, value)
# Run the model
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copy(best_ldr_path, temp_dir)
local_opt.dataroot = temp_dir
local_opt.num_test = 1
dataset = create_dataset(local_opt)
for i, data in enumerate(dataset):
model.set_input(data)
model.test()
visuals = model.get_current_visuals()
for label, image_tensor in visuals.items():
if label == 'fake':
image_numpy = (np.transpose(image_tensor.cpu().float().numpy()[0], (1, 2, 0)) + 1) / 2.0 * 255.0
final_hdr_image = Image.fromarray(image_numpy.astype(np.uint8))
print("Conversion to HDR successful.")
# Return the gallery of inputs and the single final HDR image
return uploaded_filepaths, final_hdr_image
except Exception as e:
print(f"An error occurred: {e}")
raise gr.Error(f"An error occurred during processing: {e}")
# --- Create and Launch the Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo:
gr.Markdown(
"""
# LDR Bracketing to HDR Converter
Upload a set of bracketed LDR images. The app will automatically select the best one and convert it to a vibrant, full-resolution HDR image.
"""
)
with gr.Row():
with gr.Column(scale=1, min_width=350):
# --- INPUT ---
input_files = gr.Files(
label="Upload Bracketed LDR Images",
file_types=["image"]
)
process_button = gr.Button("Process Images", variant="primary")
with gr.Accordion("See Your Uploaded Images", open=True):
input_gallery = gr.Gallery(label="Uploaded Images", show_label=False, columns=[2, 3], height="auto")
with gr.Column(scale=2):
# --- OUTPUT ---
gr.Markdown("## Generated HDR Result")
output_image = gr.Image(label="Final HDR Image", type="pil", interactive=False, show_download_button=True)
process_button.click(
fn=process_images_to_hdr,
inputs=input_files,
outputs=[input_gallery, output_image]
)
# gr.Markdown("### Examples")
# gr.Examples(
# examples=[
# [
# "../pix2pix_dataset/testA/077A2406.jpg",
# "../pix2pix_dataset/testA/077A4049.jpg",
# "../pix2pix_dataset/testA/077A4073.jpg"
# ]
# ],
# inputs=input_files
# )
print("--- Launching Gradio App ---")
demo.launch(share=True)