Spaces:
Sleeping
Sleeping
MCP ready
Browse files- demo_gradio.py +30 -3
demo_gradio.py
CHANGED
@@ -90,7 +90,34 @@ transform = Compose(
|
|
90 |
model.eval()
|
91 |
|
92 |
@spaces.GPU()
|
93 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
"""
|
96 |
Compute depth map from input_image
|
@@ -209,6 +236,6 @@ with gr.Blocks(css=css) as demo:
|
|
209 |
)
|
210 |
with gr.Column():
|
211 |
output_image = gr.Image(label="transfer result")
|
212 |
-
submit_btn.click(fn=
|
213 |
|
214 |
-
demo.queue().launch(show_error=True)
|
|
|
90 |
model.eval()
|
91 |
|
92 |
@spaces.GPU()
|
93 |
+
def infer(input_image, material_exemplar, progress=gr.Progress(track_tqdm=True)):
|
94 |
+
"""
|
95 |
+
Perform zero-shot material transfer from a single input image and a material exemplar image.
|
96 |
+
|
97 |
+
This function uses a combination of a depth estimation model (DPT), foreground/background separation,
|
98 |
+
grayscale stylization, and IP-Adapter+ControlNet with Stable Diffusion XL to generate an output image
|
99 |
+
in which the material style from the exemplar image is applied to the input image's object.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
input_image (PIL.Image): The original image containing the object to which the new material will be applied.
|
103 |
+
material_exemplar (PIL.Image): A reference image whose material (texture, reflectance, etc.) is to be transferred to the object in the input image.
|
104 |
+
progress (gradio.Progress, optional): For tracking the progress bar in Gradio UI. Default enables tqdm tracking.
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
PIL.Image: The output image showing the object from `input_image` rendered with the material of `material_exemplar`.
|
108 |
+
|
109 |
+
Steps:
|
110 |
+
1. Compute a depth map from `input_image` using a DPT-based model.
|
111 |
+
2. Remove the background from the input image to isolate the object and convert it into a grayscale version.
|
112 |
+
3. Combine and align the input image, depth map, and mask for use with the IP-Adapter + ControlNet SDXL pipeline.
|
113 |
+
4. Use the `IPAdapterXL.generate()` function to synthesize a new image by guiding generation using:
|
114 |
+
- material_exemplar for style/material guidance
|
115 |
+
- input_image's structure/content in grayscale
|
116 |
+
- the estimated depth map for spatial layout
|
117 |
+
- the mask for region-specific conditioning (object-only)
|
118 |
+
5. Return the first image in the generated list as the final material transfer result.
|
119 |
+
"""
|
120 |
+
|
121 |
|
122 |
"""
|
123 |
Compute depth map from input_image
|
|
|
236 |
)
|
237 |
with gr.Column():
|
238 |
output_image = gr.Image(label="transfer result")
|
239 |
+
submit_btn.click(fn=infer, inputs=[input_image, input_image2], outputs=[output_image])
|
240 |
|
241 |
+
demo.queue().launch(show_error=True, ssr_mode=False, mcp_server=True)
|