Raiff1982 commited on
Commit
3c68fa9
Β·
verified Β·
1 Parent(s): edee20e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -33
app.py CHANGED
@@ -3,19 +3,17 @@ import tempfile
3
  import imageio
4
  import torch
5
  import time
 
 
6
  from transformers import pipeline
7
  from diffusers import DiffusionPipeline
8
 
 
 
9
 
10
-
11
- user_input.submit(
12
- codette_terminal_limited, # <== use your chosen function name here
13
- inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id, batch_size_slider, video_steps_slider, fps_slider],
14
- outputs=[output_text, output_image, output_video]
15
- )
16
-
17
- # ---------- Configuration ----------
18
  AVAILABLE_MODELS = {
 
19
  "GPT-2 (small, fast)": "gpt2",
20
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
21
  "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
@@ -24,6 +22,9 @@ AVAILABLE_MODELS = {
24
  device = "cuda" if torch.cuda.is_available() else "cpu"
25
  text_model_cache = {}
26
  chat_memory = {}
 
 
 
27
 
28
  # ---------- Load Image Generator ----------
29
  try:
@@ -53,8 +54,8 @@ except Exception as e:
53
  video_pipeline = None
54
  video_enabled = False
55
 
56
- # ---------- Streamed Response Generator ----------
57
- def codette_terminal(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
58
  if session_id not in chat_memory:
59
  chat_memory[session_id] = []
60
 
@@ -63,28 +64,47 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
63
  yield "🧠 Codette signing off... Session reset.", None, None
64
  return
65
 
66
- # Load text model if not already loaded
67
- if model_name not in text_model_cache:
 
 
 
 
 
 
 
 
 
 
 
68
  try:
69
- text_model_cache[model_name] = pipeline(
70
- "text-generation",
71
  model=AVAILABLE_MODELS[model_name],
72
- device=0 if device == "cuda" else -1
 
 
73
  )
 
74
  except Exception as e:
75
- yield f"[Text model error]: {e}", None, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  return
77
 
78
- generator = text_model_cache[model_name]
79
-
80
- # Generate response
81
- try:
82
- output = generator(prompt, max_length=100, do_sample=True, num_return_sequences=1)[0]['generated_text'].strip()
83
- except Exception as e:
84
- yield f"[Text generation error]: {e}", None, None
85
- return
86
-
87
- # Stream the output
88
  response_so_far = ""
89
  for char in output:
90
  response_so_far += char
@@ -94,11 +114,10 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
94
  yield "\n".join(temp_log[-10:]), None, None
95
  time.sleep(0.01)
96
 
97
- # Finalize chat memory
98
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
99
  chat_memory[session_id].append(f"🧠 Codette > {output}")
100
 
101
- imgs = None
102
  if generate_image and image_enabled:
103
  try:
104
  result = image_generator(prompt, num_images_per_prompt=batch_size)
@@ -106,7 +125,6 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
106
  except Exception as e:
107
  response_so_far += f"\n[Image error]: {e}"
108
 
109
- vid = None
110
  if generate_video and video_enabled:
111
  try:
112
  result = video_pipeline(prompt, num_inference_steps=video_steps)
@@ -121,8 +139,8 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
121
 
122
  # ---------- Gradio UI ----------
123
  with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
124
- gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video + Batch + NSFW OK)")
125
- gr.Markdown("Type a prompt, select your model, and configure generation options. Type `'exit'` to reset.")
126
 
127
  with gr.Row():
128
  session_id = gr.Textbox(value="session_default", visible=False)
@@ -143,7 +161,7 @@ with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
143
  output_video = gr.Video(label="Generated Video")
144
 
145
  user_input.submit(
146
- codette_terminal,
147
  inputs=[
148
  user_input, model_dropdown, generate_image_toggle, generate_video_toggle,
149
  session_id, batch_size_slider, video_steps_slider, fps_slider
@@ -153,4 +171,4 @@ with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
153
 
154
  # ---------- Launch ----------
155
  if __name__ == "__main__":
156
- demo.launch()
 
3
  import imageio
4
  import torch
5
  import time
6
+ import os
7
+ import openai
8
  from transformers import pipeline
9
  from diffusers import DiffusionPipeline
10
 
11
+ # ---------- Secret Key Setup ----------
12
+ openai.api_key = os.getenv("OPENAI_KEY") # βœ… Hugging Face Space Secret
13
 
14
+ # ---------- Config ----------
 
 
 
 
 
 
 
15
  AVAILABLE_MODELS = {
16
+ "Codette Fine-Tuned (v9)": "ft:gpt-4.1-2025-04-14:raiffs-bits:codette-final:BO907H7Z",
17
  "GPT-2 (small, fast)": "gpt2",
18
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
19
  "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
 
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  text_model_cache = {}
24
  chat_memory = {}
25
+ MAX_PROMPTS_PER_SESSION = 5
26
+ THROTTLE_SECONDS = 30
27
+ last_usage_time = {}
28
 
29
  # ---------- Load Image Generator ----------
30
  try:
 
54
  video_pipeline = None
55
  video_enabled = False
56
 
57
+ # ---------- Rate-Limited Terminal ----------
58
+ def codette_terminal_limited(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
59
  if session_id not in chat_memory:
60
  chat_memory[session_id] = []
61
 
 
64
  yield "🧠 Codette signing off... Session reset.", None, None
65
  return
66
 
67
+ if model_name == "Codette Fine-Tuned (v9)":
68
+ count = sum(1 for line in chat_memory[session_id] if line.startswith("πŸ–‹οΈ You >"))
69
+ if count >= MAX_PROMPTS_PER_SESSION:
70
+ yield "[πŸ›‘ Limit] Max 5 prompts per session.", None, None
71
+ return
72
+ now = time.time()
73
+ if now - last_usage_time.get(session_id, 0) < THROTTLE_SECONDS:
74
+ wait = int(THROTTLE_SECONDS - (now - last_usage_time[session_id]))
75
+ yield f"[⏳ Wait] Try again in {wait} sec.", None, None
76
+ return
77
+ last_usage_time[session_id] = now
78
+
79
+ if model_name == "Codette Fine-Tuned (v9)":
80
  try:
81
+ response = openai.ChatCompletion.create(
 
82
  model=AVAILABLE_MODELS[model_name],
83
+ messages=[{"role": "user", "content": prompt}],
84
+ temperature=0.7,
85
+ max_tokens=256
86
  )
87
+ output = response.choices[0].message.content.strip()
88
  except Exception as e:
89
+ yield f"[OpenAI error]: {e}", None, None
90
+ return
91
+ else:
92
+ if model_name not in text_model_cache:
93
+ try:
94
+ text_model_cache[model_name] = pipeline(
95
+ "text-generation",
96
+ model=AVAILABLE_MODELS[model_name],
97
+ device=0 if device == "cuda" else -1
98
+ )
99
+ except Exception as e:
100
+ yield f"[Text model error]: {e}", None, None
101
+ return
102
+ try:
103
+ output = text_model_cache[model_name](prompt, max_length=100, do_sample=True, num_return_sequences=1)[0]['generated_text'].strip()
104
+ except Exception as e:
105
+ yield f"[Generation error]: {e}", None, None
106
  return
107
 
 
 
 
 
 
 
 
 
 
 
108
  response_so_far = ""
109
  for char in output:
110
  response_so_far += char
 
114
  yield "\n".join(temp_log[-10:]), None, None
115
  time.sleep(0.01)
116
 
 
117
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
118
  chat_memory[session_id].append(f"🧠 Codette > {output}")
119
 
120
+ imgs, vid = None, None
121
  if generate_image and image_enabled:
122
  try:
123
  result = image_generator(prompt, num_images_per_prompt=batch_size)
 
125
  except Exception as e:
126
  response_so_far += f"\n[Image error]: {e}"
127
 
 
128
  if generate_video and video_enabled:
129
  try:
130
  result = video_pipeline(prompt, num_inference_steps=video_steps)
 
139
 
140
  # ---------- Gradio UI ----------
141
  with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
142
+ gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video + Rate-Limited Access)")
143
+ gr.Markdown("Type a prompt, select a model, and generate responses, images, or video. Type `'exit'` to reset.")
144
 
145
  with gr.Row():
146
  session_id = gr.Textbox(value="session_default", visible=False)
 
161
  output_video = gr.Video(label="Generated Video")
162
 
163
  user_input.submit(
164
+ codette_terminal_limited,
165
  inputs=[
166
  user_input, model_dropdown, generate_image_toggle, generate_video_toggle,
167
  session_id, batch_size_slider, video_steps_slider, fps_slider
 
171
 
172
  # ---------- Launch ----------
173
  if __name__ == "__main__":
174
+ demo.launch(mcp_server=True) # βœ… tool-ready