ysharma's picture
ysharma HF Staff
Create app.py
5af5718 verified
import io
import os
from functools import cache, lru_cache
from pathlib import Path
from typing import Any
import random
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import numpy as np
# Dummy initialization - no actual model loading
DEVICE_CPU = "cpu"
DTYPE = "dummy_dtype"
FG_API_KEY = os.getenv("FG_API_KEY", "dummy_api_key")
# Dummy model and prompt objects
class DummyModel:
def __init__(self):
pass
def to(self, device, dtype):
return self
class DummyPrompt:
def to(self, device, dtype):
return self
model = DummyModel()
prompt = DummyPrompt()
@cache
def _ctx():
# Dummy context
class DummyContext:
def reset(self):
pass
def run_one_sync(self, func, *args):
# Return a dummy cutout image
img = Image.new('RGBA', (200, 200), (255, 0, 0, 128))
return img
return DummyContext()
def on_change(scene: dict[str, Any] | None, reference: Image.Image | None) -> tuple[dict[str, Any], str]:
bbox_str = ""
if scene is not None and isinstance(scene.get("boxes"), list) and len(scene.get("boxes", [])) == 1:
assert scene is not None
box = scene["boxes"][0]
bbox_str = f"({box['xmin']}, {box['ymin']}, {box['xmax']}, {box['ymax']})"
return (gr.update(interactive=reference is not None and bbox_str != ""), bbox_str)
def create_dummy_image(width: int = 512, height: int = 512, color: tuple = (100, 150, 200), text: str = "Dummy Output") -> Image.Image:
"""Create a dummy image with some text"""
img = Image.new('RGB', (width, height), color)
draw = ImageDraw.Draw(img)
# Try to use a font, fallback to default if not available
try:
font = ImageFont.truetype("arial.ttf", 24)
except:
font = ImageFont.load_default()
# Get text bounding box for centering
bbox = draw.textbbox((0, 0), text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
x = (width - text_width) // 2
y = (height - text_height) // 2
draw.text((x, y), text, fill=(255, 255, 255), font=font)
return img
def _process(
scene: dict[str, Any],
reference: Image.Image,
seed: int = 1234,
) -> tuple[tuple[Image.Image, Image.Image], Image.Image, Image.Image]:
"""Dummy processing function that returns placeholder images"""
# Get scene image or create dummy
if isinstance(scene.get("image"), Image.Image):
scene_image = scene["image"]
else:
scene_image = create_dummy_image(512, 512, (150, 100, 200), "Dummy Scene")
# Create dummy output image
output = create_dummy_image(
scene_image.width,
scene_image.height,
(random.randint(50, 200), random.randint(50, 200), random.randint(50, 200)),
f"Processed (seed: {seed})"
)
# Create dummy reference output
reference_output = reference.copy() if reference else create_dummy_image(200, 200, (255, 100, 100), "Ref")
# Create dummy scene output
scene_output = scene_image.copy()
before_after = (scene_image.resize(output.size), output)
return (before_after, reference_output, scene_output)
@lru_cache(maxsize=32)
def _cutout_reference(image_bytes: bytes) -> Image.Image:
"""Dummy cutout function"""
# Create a simple dummy cutout image
return create_dummy_image(200, 200, (255, 0, 0, 128), "Cutout")
def cutout_reference(reference: Image.Image) -> Image.Image:
"""Dummy cutout wrapper"""
if reference:
# Create a simple mask effect by making it semi-transparent
buf = io.BytesIO()
reference.save(buf, format="PNG")
return _cutout_reference(buf.getvalue())
else:
return create_dummy_image(200, 200, (255, 0, 0), "No Reference")
def process(
scene: dict[str, Any],
reference: Image.Image,
seed: int = 1234,
cut_out_reference: bool = False,
) -> tuple[tuple[Image.Image, Image.Image], Image.Image, Image.Image]:
"""Main dummy processing function"""
if cut_out_reference and reference:
reference = cutout_reference(reference)
return _process(scene, reference, seed)
TITLE = """
<h1>Finegrain Product Placement LoRA</h1>
<p>
🧪 An experiment to extend Flux Kontext with product placement capabilities.
The LoRA was trained using EditNet, our before / after image editing dataset.
</p>
<p>
Just draw a box to set where the subject should be blended, and at what size.
</p>
<p>
<a href="https://huggingface.co/finegrain/finegrain-product-placement-lora">Model Card</a> |
<a href="https://blog.finegrain.ai/posts/product-placement-flux-lora-experiment/">Blog Post</a> |
<a href="https://finegrain.ai/editnet">EditNet</a>
</p>
"""
with gr.Blocks() as demo:
gr.HTML(TITLE)
with gr.Row():
with gr.Column():
scene = gr.Image(
label="Scene",
type="pil",
image_mode="RGB",
)
reference = gr.Image(
label="Product Reference",
visible=True,
interactive=True,
type="pil",
image_mode="RGBA",
)
with gr.Accordion("Options", open=False):
seed = gr.Slider(
minimum=0,
maximum=10_000,
value=1234,
step=1,
label="Seed",
)
cut_out_reference = gr.Checkbox(
label="Cut out reference",
value=bool(FG_API_KEY),
interactive=bool(FG_API_KEY),
)
with gr.Row():
run_btn = gr.Button(value="Blend", interactive=True)
with gr.Column():
output_image = gr.Image(label="Output Image")
with gr.Accordion("Debug", open=False):
output_textbox = gr.Textbox(label="Bounding Box", interactive=False)
output_reference = gr.Image(
label="Reference",
visible=True,
interactive=False,
type="pil",
image_mode="RGB",
)
output_scene = gr.Image(
label="Scene",
visible=True,
interactive=False,
type="pil",
image_mode="RGB",
)
# Dummy change function for scene and reference
def dummy_on_change(scene, reference):
return gr.update(interactive=scene is not None and reference is not None), "Dummy bbox (100, 100, 200, 200)"
# Watch for changes (scene and reference)
scene.change(fn=dummy_on_change, inputs=[scene, reference], outputs=[run_btn, output_textbox])
reference.change(fn=dummy_on_change, inputs=[scene, reference], outputs=[run_btn, output_textbox])
def dummy_process_wrapper(scene, reference, seed, cut_out_reference):
"""Wrapper for the dummy process function"""
if not scene or not reference:
# Return dummy images if inputs are missing
dummy_img = create_dummy_image(512, 512, (100, 100, 100), "No Input")
return dummy_img, dummy_img, dummy_img
# Convert scene to the expected format
scene_dict = {"image": scene, "boxes": [{"xmin": 100, "ymin": 100, "xmax": 200, "ymax": 200}]}
result = process(scene_dict, reference, seed, cut_out_reference)
# Unpack the results
before_after, ref_out, scene_out = result
output_combined = before_after[1] # Get the "after" image
return output_combined, ref_out, scene_out
run_btn.click(
fn=dummy_process_wrapper,
inputs=[scene, reference, seed, cut_out_reference],
outputs=[output_image, output_reference, output_scene],
)
# Create dummy examples with placeholder images
def create_dummy_examples():
examples = []
colors = [(255, 100, 100), (100, 255, 100), (100, 100, 255), (255, 255, 100), (255, 100, 255)]
names = ["Sunglasses", "Kitchen", "Glass", "Chair", "Lantern"]
for i, (color, name) in enumerate(zip(colors, names)):
scene_img = create_dummy_image(400, 400, color, f"Scene {name}")
ref_img = create_dummy_image(200, 200, tuple(c//2 for c in color), f"Ref {name}")
examples.append([scene_img, ref_img])
return examples
ex = gr.Examples(
examples=create_dummy_examples(),
inputs=[scene, reference],
outputs=[output_image, output_reference, output_scene],
fn=dummy_process_wrapper,
)
if __name__ == "__main__":
demo.launch(debug=True)