File size: 1,474 Bytes
891c20f fb0aa0e 891c20f fb0aa0e 891c20f fb0aa0e 891c20f 6daa27f 891c20f 6daa27f 891c20f 6daa27f 891c20f fb0aa0e 6daa27f 891c20f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import torch
import numpy as np
import gradio as gr
from segment_anything import sam_model_registry, SamPredictor
from diffusers import StableDiffusionInpaintPipeline
from PIL import Image
# Load SAM model (lightweight version)
sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b.pth").to("cpu")
predictor = SamPredictor(sam)
# Load Stable Diffusion Inpainting Model (Optimized for CPU)
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-inpainting"
).to("cpu")
def change_dress(image, color):
"""Segment dress using SAM and recolor using Stable Diffusion."""
image = np.array(image)
# Step 1: Get mask using SAM
predictor.set_image(image)
masks, _, _ = predictor.predict(point_coords=np.array([[200, 200]]), point_labels=np.array([1]), multimask_output=False)
# Step 2: Convert mask to PIL format
mask = Image.fromarray(masks[0].astype(np.uint8) * 255)
# Step 3: Use Stable Diffusion Inpainting for color change
result = pipe(prompt=f"A dress of {color}", image=Image.fromarray(image), mask_image=mask).images[0]
return result
# Gradio Interface
interface = gr.Interface(
fn=change_dress,
inputs=[gr.Image(type="numpy"), gr.ColorPicker(label="Choose dress color")],
outputs=gr.Image(),
title="Fast Visual Try-On",
description="Segment the dress and change its color in under 20 seconds using AI."
)
if __name__ == "__main__":
interface.launch() |