|
import spaces |
|
import gradio as gr |
|
from PIL import Image |
|
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline |
|
from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref |
|
from src.unet_hacked_tryon import UNet2DConditionModel |
|
from transformers import ( |
|
CLIPImageProcessor, |
|
CLIPVisionModelWithProjection, |
|
CLIPTextModel, |
|
CLIPTextModelWithProjection, |
|
) |
|
from diffusers import DDPMScheduler,AutoencoderKL |
|
from typing import List |
|
|
|
|
|
import torch |
|
import os |
|
from transformers import AutoTokenizer |
|
|
|
import numpy as np |
|
from utils_mask import get_mask_location |
|
from torchvision import transforms |
|
import apply_net |
|
from preprocess.humanparsing.run_parsing import Parsing |
|
from preprocess.openpose.run_openpose import OpenPose |
|
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation |
|
from torchvision.transforms.functional import to_pil_image |
|
|
|
|
|
def pil_to_binary_mask(pil_image, threshold=0): |
|
np_image = np.array(pil_image) |
|
grayscale_image = Image.fromarray(np_image).convert("L") |
|
binary_mask = np.array(grayscale_image) > threshold |
|
mask = np.zeros(binary_mask.shape, dtype=np.uint8) |
|
for i in range(binary_mask.shape[0]): |
|
for j in range(binary_mask.shape[1]): |
|
if binary_mask[i,j] == True : |
|
mask[i,j] = 1 |
|
mask = (mask*255).astype(np.uint8) |
|
output_mask = Image.fromarray(mask) |
|
return output_mask |
|
|
|
|
|
base_path = 'yisol/IDM-VTON' |
|
example_path = os.path.join(os.path.dirname(__file__), 'example') |
|
|
|
unet = UNet2DConditionModel.from_pretrained( |
|
base_path, |
|
subfolder="unet", |
|
torch_dtype=torch.float16, |
|
) |
|
unet.requires_grad_(False) |
|
tokenizer_one = AutoTokenizer.from_pretrained( |
|
base_path, |
|
subfolder="tokenizer", |
|
revision=None, |
|
use_fast=False, |
|
) |
|
tokenizer_two = AutoTokenizer.from_pretrained( |
|
base_path, |
|
subfolder="tokenizer_2", |
|
revision=None, |
|
use_fast=False, |
|
) |
|
noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler") |
|
|
|
text_encoder_one = CLIPTextModel.from_pretrained( |
|
base_path, |
|
subfolder="text_encoder", |
|
torch_dtype=torch.float16, |
|
) |
|
text_encoder_two = CLIPTextModelWithProjection.from_pretrained( |
|
base_path, |
|
subfolder="text_encoder_2", |
|
torch_dtype=torch.float16, |
|
) |
|
image_encoder = CLIPVisionModelWithProjection.from_pretrained( |
|
base_path, |
|
subfolder="image_encoder", |
|
torch_dtype=torch.float16, |
|
) |
|
vae = AutoencoderKL.from_pretrained(base_path, |
|
subfolder="vae", |
|
torch_dtype=torch.float16, |
|
) |
|
|
|
|
|
UNet_Encoder = UNet2DConditionModel_ref.from_pretrained( |
|
base_path, |
|
subfolder="unet_encoder", |
|
torch_dtype=torch.float16, |
|
) |
|
|
|
parsing_model = Parsing(0) |
|
openpose_model = OpenPose(0) |
|
|
|
UNet_Encoder.requires_grad_(False) |
|
image_encoder.requires_grad_(False) |
|
vae.requires_grad_(False) |
|
unet.requires_grad_(False) |
|
text_encoder_one.requires_grad_(False) |
|
text_encoder_two.requires_grad_(False) |
|
tensor_transfrom = transforms.Compose( |
|
[ |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.5], [0.5]), |
|
] |
|
) |
|
|
|
pipe = TryonPipeline.from_pretrained( |
|
base_path, |
|
unet=unet, |
|
vae=vae, |
|
feature_extractor= CLIPImageProcessor(), |
|
text_encoder = text_encoder_one, |
|
text_encoder_2 = text_encoder_two, |
|
tokenizer = tokenizer_one, |
|
tokenizer_2 = tokenizer_two, |
|
scheduler = noise_scheduler, |
|
image_encoder=image_encoder, |
|
torch_dtype=torch.float16, |
|
) |
|
pipe.unet_encoder = UNet_Encoder |
|
|
|
@spaces.GPU |
|
def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, category): |
|
"""虚拟试衣主函数 |
|
Args: |
|
dict: 输入图像字典,包含背景和图层信息 |
|
garm_img: 服装图片 |
|
garment_des: 服装描述文本 |
|
is_checked: 是否启用自动检测模式 |
|
is_checked_crop: 是否启用图像裁剪 |
|
denoise_steps: 去噪步数 |
|
seed: 随机种子 |
|
category: 服装类别 |
|
Returns: |
|
生成的试衣结果图像和灰度遮罩 |
|
""" |
|
|
|
device = "cuda" |
|
openpose_model.preprocessor.body_estimation.model.to(device) |
|
pipe.to(device) |
|
pipe.unet_encoder.to(device) |
|
|
|
|
|
garm_img = garm_img.convert("RGB").resize((768,1024)) |
|
human_img_orig = dict["background"].convert("RGB") |
|
orig_size = human_img_orig.size |
|
|
|
|
|
if is_checked_crop: |
|
width, height = human_img_orig.size |
|
target_width = int(min(width, height * (3 / 4))) |
|
target_height = int(min(height, width * (4 / 3))) |
|
left = (width - target_width) / 2 |
|
top = (height - target_height) / 2 |
|
right = (width + target_width) / 2 |
|
bottom = (height + target_height) / 2 |
|
cropped_img = human_img_orig.crop((left, top, right, bottom)) |
|
crop_size = cropped_img.size |
|
human_img = cropped_img.resize((768,1024)) |
|
else: |
|
human_img = human_img_orig.resize((768,1024)) |
|
|
|
|
|
if is_checked: |
|
|
|
|
|
keypoints = openpose_model(human_img.resize((384,512))) |
|
|
|
model_parse, _ = parsing_model(human_img.resize((384,512))) |
|
|
|
mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints) |
|
mask = mask.resize((768,1024)) |
|
else: |
|
|
|
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024))) |
|
|
|
|
|
mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img) |
|
mask_gray = to_pil_image((mask_gray+1.0)/2.0) |
|
|
|
|
|
|
|
human_img_arg = _apply_exif_orientation(human_img.resize((384,512))) |
|
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR") |
|
|
|
|
|
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda')) |
|
pose_img = args.func(args,human_img_arg) |
|
pose_img = pose_img[:,:,::-1] |
|
pose_img = Image.fromarray(pose_img).resize((768,1024)) |
|
|
|
|
|
with torch.no_grad(): |
|
with torch.cuda.amp.autocast(): |
|
with torch.no_grad(): |
|
|
|
prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), the model is wearing " + garment_des |
|
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, normal quality, low quality, blurry, jpeg artifacts, sketch" |
|
with torch.inference_mode(): |
|
|
|
( |
|
prompt_embeds, |
|
negative_prompt_embeds, |
|
pooled_prompt_embeds, |
|
negative_pooled_prompt_embeds, |
|
) = pipe.encode_prompt( |
|
prompt, |
|
num_images_per_prompt=1, |
|
do_classifier_free_guidance=True, |
|
negative_prompt=negative_prompt, |
|
) |
|
|
|
|
|
prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), a photo of " + garment_des |
|
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, normal quality, low quality, blurry, jpeg artifacts, sketch" |
|
if not isinstance(prompt, List): |
|
prompt = [prompt] * 1 |
|
if not isinstance(negative_prompt, List): |
|
negative_prompt = [negative_prompt] * 1 |
|
with torch.inference_mode(): |
|
( |
|
prompt_embeds_c, |
|
_, |
|
_, |
|
_, |
|
) = pipe.encode_prompt( |
|
prompt, |
|
num_images_per_prompt=1, |
|
do_classifier_free_guidance=False, |
|
negative_prompt=negative_prompt, |
|
) |
|
|
|
|
|
pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16) |
|
garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16) |
|
generator = torch.Generator(device).manual_seed(seed) if seed is not None else None |
|
|
|
|
|
images = pipe( |
|
prompt_embeds=prompt_embeds.to(device,torch.float16), |
|
negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16), |
|
pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16), |
|
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,torch.float16), |
|
num_inference_steps=denoise_steps, |
|
generator=generator, |
|
strength=1.0, |
|
pose_img=pose_img.to(device,torch.float16), |
|
text_embeds_cloth=prompt_embeds_c.to(device,torch.float16), |
|
cloth=garm_tensor.to(device,torch.float16), |
|
mask_image=mask, |
|
image=human_img, |
|
height=1024, |
|
width=768, |
|
ip_adapter_image=garm_img.resize((768,1024)), |
|
guidance_scale=2.0, |
|
)[0] |
|
|
|
|
|
if is_checked_crop: |
|
|
|
return images[0].resize(crop_size), mask_gray.resize(crop_size) |
|
else: |
|
|
|
return images[0].resize(orig_size), mask_gray.resize(orig_size) |
|
|
|
garm_list = os.listdir(os.path.join(example_path,"cloth")) |
|
garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list] |
|
|
|
human_list = os.listdir(os.path.join(example_path,"human")) |
|
human_list_path = [os.path.join(example_path,"human",human) for human in human_list] |
|
|
|
|
|
|
|
max_clothes = min(5, len(garm_list_path)) |
|
max_humans = min(5, len(human_list_path)) |
|
|
|
|
|
if max_clothes >= 5: |
|
garm_list_path = garm_list_path[:5] |
|
elif max_clothes >= 3: |
|
garm_list_path = garm_list_path[:3] |
|
else: |
|
garm_list_path = garm_list_path[:1] |
|
|
|
if max_humans >= 5: |
|
human_list_path = human_list_path[:5] |
|
elif max_humans >= 3: |
|
human_list_path = human_list_path[:3] |
|
else: |
|
human_list_path = human_list_path[:1] |
|
|
|
human_ex_list = [] |
|
for ex_human in human_list_path: |
|
ex_dict= {} |
|
ex_dict['background'] = ex_human |
|
ex_dict['layers'] = None |
|
ex_dict['composite'] = None |
|
human_ex_list.append(ex_dict) |
|
|
|
|
|
custom_css = """ |
|
.infomerica-header { |
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
padding: 20px; |
|
border-radius: 10px; |
|
margin-bottom: 20px; |
|
text-align: center; |
|
color: white; |
|
} |
|
|
|
.infomerica-logo { |
|
max-height: 60px; |
|
margin-bottom: 10px; |
|
} |
|
|
|
.infomerica-title { |
|
font-size: 2.5em; |
|
font-weight: bold; |
|
margin: 10px 0; |
|
text-shadow: 2px 2px 4px rgba(0,0,0,0.3); |
|
} |
|
|
|
.infomerica-subtitle { |
|
font-size: 1.2em; |
|
opacity: 0.9; |
|
margin-bottom: 10px; |
|
} |
|
|
|
.powered-by { |
|
font-size: 0.9em; |
|
opacity: 0.8; |
|
margin-top: 10px; |
|
} |
|
|
|
.footer-branding { |
|
text-align: center; |
|
margin-top: 30px; |
|
padding: 20px; |
|
background-color: #f8f9fa; |
|
border-radius: 10px; |
|
border: 1px solid #e9ecef; |
|
} |
|
|
|
.footer-links { |
|
display: flex; |
|
justify-content: center; |
|
gap: 20px; |
|
margin-top: 15px; |
|
} |
|
|
|
.footer-links a { |
|
color: #667eea; |
|
text-decoration: none; |
|
font-weight: 500; |
|
} |
|
|
|
.footer-links a:hover { |
|
color: #764ba2; |
|
text-decoration: underline; |
|
} |
|
""" |
|
|
|
image_blocks = gr.Blocks(css=custom_css).queue() |
|
with image_blocks as demo: |
|
|
|
|
|
gr.HTML(""" |
|
<div class="infomerica-header"> |
|
<img src="https://infomericainc.com/Content/images/logo.png" alt="Infomerica Logo" class="infomerica-logo"> |
|
<h1 class="infomerica-title">AI Virtual Try-On</h1> |
|
<p class="infomerica-subtitle">Experience the future of fashion with our advanced AI technology</p> |
|
<p class="powered-by">Powered by Infomerica Inc.</p> |
|
</div> |
|
""") |
|
|
|
|
|
with gr.Column(): |
|
with gr.Accordion(label="⚙️ Advanced Settings", open=False): |
|
with gr.Row(): |
|
denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=40, value=30, step=1) |
|
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=-1) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
imgs = gr.ImageEditor( |
|
sources='upload', |
|
type="pil", |
|
label='👤 Upload Your Photo', |
|
interactive=True, |
|
height=400 |
|
) |
|
with gr.Row(): |
|
is_checked = gr.Checkbox( |
|
label="Auto-Masking", |
|
info="✨ Use AI-generated mask (Takes 5 seconds)", |
|
value=True |
|
) |
|
with gr.Row(): |
|
category = gr.Dropdown( |
|
choices=["upper_body", "lower_body", "dresses"], |
|
label="👗 Clothing Category", |
|
value="upper_body" |
|
) |
|
with gr.Row(): |
|
is_checked_crop = gr.Checkbox( |
|
label="Auto-Crop", |
|
info="📐 Use automatic cropping & resizing", |
|
value=False |
|
) |
|
|
|
example = gr.Examples( |
|
inputs=imgs, |
|
examples_per_page=5, |
|
examples=human_ex_list, |
|
label="📸 Sample Models" |
|
) |
|
|
|
with gr.Column(): |
|
garm_img = gr.Image( |
|
label="👚 Upload Garment", |
|
sources='upload', |
|
type="pil", |
|
height=400 |
|
) |
|
with gr.Row(elem_id="prompt-container"): |
|
with gr.Row(): |
|
prompt = gr.Textbox( |
|
label="✏️ Describe the Garment", |
|
placeholder="e.g., Blue denim jacket, Red summer dress, Black leather boots", |
|
show_label=True, |
|
elem_id="prompt" |
|
) |
|
example = gr.Examples( |
|
inputs=garm_img, |
|
examples_per_page=5, |
|
examples=garm_list_path, |
|
label="👕 Sample Garments" |
|
) |
|
|
|
with gr.Column(): |
|
masked_img = gr.Image( |
|
label="🎭 Processed Mask", |
|
elem_id="masked-img", |
|
show_share_button=False, |
|
height=400 |
|
) |
|
|
|
with gr.Column(): |
|
image_out = gr.Image( |
|
label="✨ Final Result", |
|
elem_id="output-img", |
|
show_share_button=False, |
|
height=400 |
|
) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
try_button = gr.Button(value="🚀 Start Virtual Try-On", variant="primary", size="lg") |
|
|
|
|
|
gr.HTML(""" |
|
<div class="footer-branding"> |
|
<h3>About Infomerica Inc.</h3> |
|
<p>Leading the way in AI innovation and digital transformation solutions.</p> |
|
<div class="footer-links"> |
|
<a href="https://infomericainc.com" target="_blank">🌐 Visit Our Website</a> |
|
<a href="https://infomericainc.com/about" target="_blank">ℹ️ About Us</a> |
|
<a href="https://infomericainc.com/contact" target="_blank">📞 Contact</a> |
|
<a href="https://infomericainc.com/services" target="_blank">🛠️ Our Services</a> |
|
</div> |
|
<p style="margin-top: 15px; font-size: 0.9em; color: #6c757d;"> |
|
© 2024 Infomerica Inc. All rights reserved. | Transforming businesses through innovative AI solutions. |
|
</p> |
|
</div> |
|
""") |
|
|
|
|
|
try_button.click( |
|
fn=start_tryon, |
|
inputs=[imgs, garm_img, prompt, is_checked, is_checked_crop, denoise_steps, seed, category], |
|
outputs=[image_out, masked_img], |
|
api_name='tryon' |
|
) |
|
|
|
image_blocks.launch() |