Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
import tempfile
|
3 |
import imageio
|
4 |
import torch
|
|
|
5 |
from transformers import pipeline
|
6 |
from diffusers import DiffusionPipeline
|
7 |
|
@@ -75,7 +76,7 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
|
|
75 |
yield f"[Text generation error]: {e}", None, None
|
76 |
return
|
77 |
|
78 |
-
# Stream the output
|
79 |
response_so_far = ""
|
80 |
for char in output:
|
81 |
response_so_far += char
|
@@ -83,14 +84,12 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
|
|
83 |
temp_log.append(f"ποΈ You > {prompt}")
|
84 |
temp_log.append(f"π§ Codette > {response_so_far}")
|
85 |
yield "\n".join(temp_log[-10:]), None, None
|
86 |
-
import time
|
87 |
time.sleep(0.01)
|
88 |
|
89 |
# Finalize chat memory
|
90 |
chat_memory[session_id].append(f"ποΈ You > {prompt}")
|
91 |
chat_memory[session_id].append(f"π§ Codette > {output}")
|
92 |
|
93 |
-
# Image Generation
|
94 |
imgs = None
|
95 |
if generate_image and image_enabled:
|
96 |
try:
|
@@ -99,7 +98,6 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
|
|
99 |
except Exception as e:
|
100 |
response_so_far += f"\n[Image error]: {e}"
|
101 |
|
102 |
-
# Video Generation
|
103 |
vid = None
|
104 |
if generate_video and video_enabled:
|
105 |
try:
|
@@ -118,25 +116,31 @@ with gr.Blocks(title="𧬠Codette Terminal β Streamed AI Chat") as demo:
|
|
118 |
gr.Markdown("## 𧬠Codette Terminal (Chat + Image + Video + Batch + NSFW OK)")
|
119 |
gr.Markdown("Type a prompt, select your model, and configure generation options. Type `'exit'` to reset.")
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
|
129 |
output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
|
130 |
-
output_image = gr.Gallery(label="Generated Image(s)"
|
131 |
output_video = gr.Video(label="Generated Video")
|
132 |
|
133 |
user_input.submit(
|
134 |
codette_terminal,
|
135 |
-
inputs=[
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
)
|
141 |
|
142 |
# ---------- Launch ----------
|
|
|
2 |
import tempfile
|
3 |
import imageio
|
4 |
import torch
|
5 |
+
import time
|
6 |
from transformers import pipeline
|
7 |
from diffusers import DiffusionPipeline
|
8 |
|
|
|
76 |
yield f"[Text generation error]: {e}", None, None
|
77 |
return
|
78 |
|
79 |
+
# Stream the output
|
80 |
response_so_far = ""
|
81 |
for char in output:
|
82 |
response_so_far += char
|
|
|
84 |
temp_log.append(f"ποΈ You > {prompt}")
|
85 |
temp_log.append(f"π§ Codette > {response_so_far}")
|
86 |
yield "\n".join(temp_log[-10:]), None, None
|
|
|
87 |
time.sleep(0.01)
|
88 |
|
89 |
# Finalize chat memory
|
90 |
chat_memory[session_id].append(f"ποΈ You > {prompt}")
|
91 |
chat_memory[session_id].append(f"π§ Codette > {output}")
|
92 |
|
|
|
93 |
imgs = None
|
94 |
if generate_image and image_enabled:
|
95 |
try:
|
|
|
98 |
except Exception as e:
|
99 |
response_so_far += f"\n[Image error]: {e}"
|
100 |
|
|
|
101 |
vid = None
|
102 |
if generate_video and video_enabled:
|
103 |
try:
|
|
|
116 |
gr.Markdown("## 𧬠Codette Terminal (Chat + Image + Video + Batch + NSFW OK)")
|
117 |
gr.Markdown("Type a prompt, select your model, and configure generation options. Type `'exit'` to reset.")
|
118 |
|
119 |
+
with gr.Row():
|
120 |
+
session_id = gr.Textbox(value="session_default", visible=False)
|
121 |
+
model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Language Model")
|
122 |
+
|
123 |
+
with gr.Row():
|
124 |
+
generate_image_toggle = gr.Checkbox(label="Generate Image(s)?", value=False, interactive=image_enabled)
|
125 |
+
generate_video_toggle = gr.Checkbox(label="Generate Video?", value=False, interactive=video_enabled)
|
126 |
+
|
127 |
+
with gr.Row():
|
128 |
+
batch_size_slider = gr.Slider(label="Number of Images", minimum=1, maximum=4, step=1, value=1)
|
129 |
+
video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
|
130 |
+
fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
|
131 |
+
|
132 |
user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
|
133 |
output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
|
134 |
+
output_image = gr.Gallery(label="Generated Image(s)", columns=2)
|
135 |
output_video = gr.Video(label="Generated Video")
|
136 |
|
137 |
user_input.submit(
|
138 |
codette_terminal,
|
139 |
+
inputs=[
|
140 |
+
user_input, model_dropdown, generate_image_toggle, generate_video_toggle,
|
141 |
+
session_id, batch_size_slider, video_steps_slider, fps_slider
|
142 |
+
],
|
143 |
+
outputs=[output_text, output_image, output_video]
|
144 |
)
|
145 |
|
146 |
# ---------- Launch ----------
|