Spaces:
Running
on
Zero
Running
on
Zero
fix image_to_3d
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
from gradio_litmodel3d import LitModel3D
|
4 |
-
|
5 |
import os
|
6 |
import shutil
|
7 |
os.environ['SPCONV_ALGO'] = 'native'
|
@@ -119,7 +119,7 @@ def image_to_3d(
|
|
119 |
slat_sampling_steps: int,
|
120 |
multiimage_algo: Literal["multidiffusion", "stochastic"],
|
121 |
req: gr.Request,
|
122 |
-
) -> Tuple[dict,
|
123 |
"""
|
124 |
Convert an image (or multiple images) into a 3D model and return its state and video.
|
125 |
|
@@ -135,9 +135,10 @@ def image_to_3d(
|
|
135 |
multiimage_algo (str): Multi-image algorithm to use.
|
136 |
|
137 |
Returns:
|
138 |
-
dict:
|
139 |
-
|
140 |
-
str:
|
|
|
141 |
"""
|
142 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
143 |
os.makedirs(user_dir, exist_ok=True)
|
@@ -187,7 +188,7 @@ def image_to_3d(
|
|
187 |
# Pack state for downstream use
|
188 |
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
|
189 |
torch.cuda.empty_cache()
|
190 |
-
return state,
|
191 |
|
192 |
|
193 |
|
@@ -324,7 +325,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
324 |
|
325 |
is_multiimage = gr.State(False)
|
326 |
output_buf = gr.State()
|
327 |
-
|
328 |
|
329 |
# Example images at the bottom of the page
|
330 |
with gr.Row() as single_image_example:
|
@@ -384,7 +385,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
384 |
ss_guidance_strength, ss_sampling_steps,
|
385 |
slat_guidance_strength, slat_sampling_steps, multiimage_algo
|
386 |
],
|
387 |
-
outputs=[output_buf, video_output,
|
388 |
).then(
|
389 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|
390 |
outputs=[extract_glb_btn, extract_gs_btn],
|
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
from gradio_litmodel3d import LitModel3D
|
4 |
+
import json
|
5 |
import os
|
6 |
import shutil
|
7 |
os.environ['SPCONV_ALGO'] = 'native'
|
|
|
119 |
slat_sampling_steps: int,
|
120 |
multiimage_algo: Literal["multidiffusion", "stochastic"],
|
121 |
req: gr.Request,
|
122 |
+
) -> Tuple[dict, str, str]:
|
123 |
"""
|
124 |
Convert an image (or multiple images) into a 3D model and return its state and video.
|
125 |
|
|
|
135 |
multiimage_algo (str): Multi-image algorithm to use.
|
136 |
|
137 |
Returns:
|
138 |
+
dict: The information of the generated 3D model.
|
139 |
+
str: The path to the video of the 3D model.
|
140 |
+
str: serialized JSON of state
|
141 |
+
|
142 |
"""
|
143 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
144 |
os.makedirs(user_dir, exist_ok=True)
|
|
|
188 |
# Pack state for downstream use
|
189 |
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
|
190 |
torch.cuda.empty_cache()
|
191 |
+
return state, video_path, json.dumps(state)
|
192 |
|
193 |
|
194 |
|
|
|
325 |
|
326 |
is_multiimage = gr.State(False)
|
327 |
output_buf = gr.State()
|
328 |
+
state_textbox = gr.Textbox(visible=False, label="Serialized State")
|
329 |
|
330 |
# Example images at the bottom of the page
|
331 |
with gr.Row() as single_image_example:
|
|
|
385 |
ss_guidance_strength, ss_sampling_steps,
|
386 |
slat_guidance_strength, slat_sampling_steps, multiimage_algo
|
387 |
],
|
388 |
+
outputs=[output_buf, video_output, state_textbox], # multi output
|
389 |
).then(
|
390 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|
391 |
outputs=[extract_glb_btn, extract_gs_btn],
|