File size: 1,586 Bytes
57c3927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc2b46e
57c3927
 
ce7a626
57c3927
 
ce7a626
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
import torch
import numpy as np
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import make_image_grid
import cv2

controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetPipeline.from_pretrained(
    "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("cpu")

def generate_image(input_image, text_prompt):
    original_image = np.array(input_image)
    low_threshold = 100
    high_threshold = 200
    edges = cv2.Canny(original_image, low_threshold, high_threshold)
    edges = edges[:, :, None]
    canny_image = np.concatenate([edges, edges, edges], axis=2)
    canny_image_pil = Image.fromarray(canny_image)
    output_image = pipe(text_prompt, image=canny_image_pil).images[0]
    result_grid = make_image_grid([input_image, canny_image_pil, output_image], rows=1, cols=3)
    return result_grid

with gr.Blocks() as demo:
    gr.Markdown("# Image Transformation with ControlNet and Stable Diffusion (CPU Only)")
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="pil", label="Upload Image")
            text_prompt = gr.Textbox(label="Enter a prompt for the transformation")
    generate_button = gr.Button("Generate Image")
    result = gr.Image(label="Result")
    generate_button.click(fn=generate_image, inputs=[input_image, text_prompt], outputs=result)

demo.launch()