Raiff1982 commited on
Commit
7035fed
Β·
verified Β·
1 Parent(s): 694da97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -29
app.py CHANGED
@@ -4,14 +4,14 @@ import imageio
4
  import torch
5
  import time
6
  import os
7
- import openai
8
  from transformers import pipeline
9
  from diffusers import DiffusionPipeline
10
 
11
- # ---------- Secret Key Setup ----------
12
- openai.api_key = os.getenv("OPENAI_KEY") # βœ… Hugging Face Space Secret
13
 
14
- # ---------- Config ----------
15
  AVAILABLE_MODELS = {
16
  "Codette Fine-Tuned (v9)": "ft:gpt-4.1-2025-04-14:raiffs-bits:codette-final:BO907H7Z",
17
  "GPT-2 (small, fast)": "gpt2",
@@ -22,9 +22,10 @@ AVAILABLE_MODELS = {
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  text_model_cache = {}
24
  chat_memory = {}
 
 
25
  MAX_PROMPTS_PER_SESSION = 5
26
  THROTTLE_SECONDS = 30
27
- last_usage_time = {}
28
 
29
  # ---------- Load Image Generator ----------
30
  try:
@@ -54,7 +55,7 @@ except Exception as e:
54
  video_pipeline = None
55
  video_enabled = False
56
 
57
- # ---------- Rate-Limited Terminal ----------
58
  def codette_terminal_limited(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
59
  if session_id not in chat_memory:
60
  chat_memory[session_id] = []
@@ -78,7 +79,7 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
78
 
79
  if model_name == "Codette Fine-Tuned (v9)":
80
  try:
81
- response = openai.ChatCompletion.create(
82
  model=AVAILABLE_MODELS[model_name],
83
  messages=[{"role": "user", "content": prompt}],
84
  temperature=0.7,
@@ -86,7 +87,7 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
86
  )
87
  output = response.choices[0].message.content.strip()
88
  except Exception as e:
89
- yield f"[OpenAI error]: {e}", None, None
90
  return
91
  else:
92
  if model_name not in text_model_cache:
@@ -100,11 +101,14 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
100
  yield f"[Text model error]: {e}", None, None
101
  return
102
  try:
103
- output = text_model_cache[model_name](prompt, max_length=100, do_sample=True, num_return_sequences=1)[0]['generated_text'].strip()
 
 
104
  except Exception as e:
105
  yield f"[Generation error]: {e}", None, None
106
  return
107
 
 
108
  response_so_far = ""
109
  for char in output:
110
  response_so_far += char
@@ -117,7 +121,8 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
117
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
118
  chat_memory[session_id].append(f"🧠 Codette > {output}")
119
 
120
- imgs, vid = None, None
 
121
  if generate_image and image_enabled:
122
  try:
123
  result = image_generator(prompt, num_images_per_prompt=batch_size)
@@ -125,6 +130,8 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
125
  except Exception as e:
126
  response_so_far += f"\n[Image error]: {e}"
127
 
 
 
128
  if generate_video and video_enabled:
129
  try:
130
  result = video_pipeline(prompt, num_inference_steps=video_steps)
@@ -139,8 +146,8 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
139
 
140
  # ---------- Gradio UI ----------
141
  with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
142
- gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video + Rate-Limited Access)")
143
- gr.Markdown("Type a prompt, select a model, and generate responses, images, or video. Type `'exit'` to reset.")
144
 
145
  with gr.Row():
146
  session_id = gr.Textbox(value="session_default", visible=False)
@@ -155,20 +162,4 @@ with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
155
  video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
156
  fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
157
 
158
- user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
159
- output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
160
- output_image = gr.Gallery(label="Generated Image(s)", columns=2)
161
- output_video = gr.Video(label="Generated Video")
162
-
163
- user_input.submit(
164
- codette_terminal_limited,
165
- inputs=[
166
- user_input, model_dropdown, generate_image_toggle, generate_video_toggle,
167
- session_id, batch_size_slider, video_steps_slider, fps_slider
168
- ],
169
- outputs=[output_text, output_image, output_video]
170
- )
171
-
172
- # ---------- Launch ----------
173
- if __name__ == "__main__":
174
- demo.launch(mcp_server=True) # βœ… tool-ready
 
4
  import torch
5
  import time
6
  import os
7
+ from openai import OpenAI
8
  from transformers import pipeline
9
  from diffusers import DiffusionPipeline
10
 
11
+ # ---------- Load OpenAI Key from HF Secrets ----------
12
+ client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
13
 
14
+ # ---------- Configuration ----------
15
  AVAILABLE_MODELS = {
16
  "Codette Fine-Tuned (v9)": "ft:gpt-4.1-2025-04-14:raiffs-bits:codette-final:BO907H7Z",
17
  "GPT-2 (small, fast)": "gpt2",
 
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  text_model_cache = {}
24
  chat_memory = {}
25
+ last_usage_time = {}
26
+
27
  MAX_PROMPTS_PER_SESSION = 5
28
  THROTTLE_SECONDS = 30
 
29
 
30
  # ---------- Load Image Generator ----------
31
  try:
 
55
  video_pipeline = None
56
  video_enabled = False
57
 
58
+ # ---------- Main Terminal with Usage Limits ----------
59
  def codette_terminal_limited(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
60
  if session_id not in chat_memory:
61
  chat_memory[session_id] = []
 
79
 
80
  if model_name == "Codette Fine-Tuned (v9)":
81
  try:
82
+ response = client.chat.completions.create(
83
  model=AVAILABLE_MODELS[model_name],
84
  messages=[{"role": "user", "content": prompt}],
85
  temperature=0.7,
 
87
  )
88
  output = response.choices[0].message.content.strip()
89
  except Exception as e:
90
+ yield f"[OpenAI v1 error]: {e}", None, None
91
  return
92
  else:
93
  if model_name not in text_model_cache:
 
101
  yield f"[Text model error]: {e}", None, None
102
  return
103
  try:
104
+ output = text_model_cache[model_name](
105
+ prompt, max_length=100, do_sample=True, num_return_sequences=1
106
+ )[0]['generated_text'].strip()
107
  except Exception as e:
108
  yield f"[Generation error]: {e}", None, None
109
  return
110
 
111
+ # Stream the output
112
  response_so_far = ""
113
  for char in output:
114
  response_so_far += char
 
121
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
122
  chat_memory[session_id].append(f"🧠 Codette > {output}")
123
 
124
+ # Image Generation
125
+ imgs = None
126
  if generate_image and image_enabled:
127
  try:
128
  result = image_generator(prompt, num_images_per_prompt=batch_size)
 
130
  except Exception as e:
131
  response_so_far += f"\n[Image error]: {e}"
132
 
133
+ # Video Generation
134
+ vid = None
135
  if generate_video and video_enabled:
136
  try:
137
  result = video_pipeline(prompt, num_inference_steps=video_steps)
 
146
 
147
  # ---------- Gradio UI ----------
148
  with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
149
+ gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video + Fine-Tuned AI)")
150
+ gr.Markdown("Type a prompt, choose a model, and generate intelligent responses. Type `'exit'` to reset the session.")
151
 
152
  with gr.Row():
153
  session_id = gr.Textbox(value="session_default", visible=False)
 
162
  video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
163
  fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
164
 
165
+ user_input = gr.Textbox(label="Your Prompt", placeholder="e._