LayoutPainter / app.py
HBDing's picture
upd
f53fb95
raw
history blame
3.1 kB
import gradio as gr
# import spaces #[uncomment to use ZeroGPU]
from gradio_image_annotation import image_annotator
from diffusers import StableDiffusionPipeline
import os
import torch
from diffusers import EulerDiscreteScheduler
from migc.migc_utils import seed_everything
from migc.migc_pipeline import StableDiffusionMIGCPipeline, MIGCProcessor, AttentionStore
from huggingface_hub import hf_hub_download
# 下载文件
migc_ckpt_path = hf_hub_download(
repo_id="limuloo1999/MIGC",
filename="MIGC_SD14.ckpt",
repo_type="model" # 也可以省略,默认就是 model
)
RV_path = hf_hub_download(
repo_id="SG161222/Realistic_Vision_V6.0_B1_noVAE",
filename="Realistic_Vision_V6.0_NV_B1.safetensors",
repo_type="model" # 也可以省略,默认就是 model
)
# Load model
# pipe = StableDiffusionMIGCPipeline.from_pretrained(
# "rSG161222/Realistic_Vision_V6.0_B1_noVAE",
# torch_dtype=torch.float32
# )
pipe = StableDiffusionMIGCPipeline.from_single_file(
RV_path,
torch_dtype=torch.float32
)
pipe.safety_checker = None
pipe.attention_store = AttentionStore()
from migc.migc_utils import load_migc
load_migc(pipe.unet , pipe.attention_store,
migc_ckpt_path, attn_processor=MIGCProcessor)
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
example_annotation = {
"image": os.path.join(os.path.dirname(__file__), "background.png"),
"boxes": [],
}
# @spaces.GPU
def get_boxes_json(annotations):
print(annotations)
image = annotations["image"]
width = image.shape[1]
height = image.shape[0]
boxes = annotations["boxes"]
prompt_final = [[]]
bboxes = [[]]
for box in boxes:
box["xmin"] = box["xmin"] / width
box["xmax"] = box["xmax"] / width
box["ymin"] = box["ymin"] / height
box["ymax"] = box["ymax"] / height
prompt_final[0].append(box["label"])
bboxes[0].append([box["xmin"], box["ymin"], box["xmax"], box["ymax"]])
# import pdb; pdb.set_trace()
prompt = ", ".join(prompt_final[0])
prompt_final[0].insert(0, prompt)
negative_prompt = 'worst quality, low quality, bad anatomy, watermark, text, blurry'
image = pipe(prompt_final, bboxes, num_inference_steps=30, guidance_scale=7.5,
MIGCsteps=15, aug_phase_with_and=False, negative_prompt=negative_prompt).images[0]
return image
# return annotations["boxes"]
with gr.Blocks() as demo:
with gr.Tab("DreamRenderer", id="DreamRenderer"):
with gr.Row():
with gr.Column(scale=1):
annotator = image_annotator(
example_annotation,
height=512,
width=512
)
with gr.Column(scale=1):
generated_image = gr.Image(label="Generated Image", height=512, width=512)
button_get = gr.Button("Generation")
button_get.click(get_boxes_json, inputs=annotator, outputs=generated_image)
if __name__ == "__main__":
demo.launch()