Raiff1982 commited on
Commit
ff58d3c
Β·
verified Β·
1 Parent(s): e3dc1e5

Create codette_terminal_limited/py

Browse files
Files changed (1) hide show
  1. codette_terminal_limited/py +99 -0
codette_terminal_limited/py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import time
4
+
5
+ # Set API key
6
+ os.environ["OPENAI_API_KEY"] = "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"
7
+ openai.api_key = os.getenv("OPENAI_API_KEY")
8
+
9
+ MAX_PROMPTS_PER_SESSION = 5
10
+ THROTTLE_SECONDS = 30
11
+ last_usage_time = {}
12
+
13
+ def codette_terminal(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
14
+ if session_id not in chat_memory:
15
+ chat_memory[session_id] = []
16
+
17
+ if prompt.lower() in ["exit", "quit"]:
18
+ chat_memory[session_id] = []
19
+ yield "🧠 Codette signing off... Session reset.", None, None
20
+ return
21
+
22
+ # --- Usage limits for fine-tuned model only ---
23
+ if model_name == "Codette Fine-Tuned (v9)":
24
+ count = sum(1 for line in chat_memory[session_id] if line.startswith("πŸ–‹οΈ You >"))
25
+ if count >= MAX_PROMPTS_PER_SESSION:
26
+ yield "[πŸ›‘ Usage Limit] You've reached the max prompt limit (5) for this session.", None, None
27
+ return
28
+
29
+ now = time.time()
30
+ if now - last_usage_time.get(session_id, 0) < THROTTLE_SECONDS:
31
+ wait = int(THROTTLE_SECONDS - (now - last_usage_time[session_id]))
32
+ yield f"[⏳ Throttle] Wait {wait}s before trying again.", None, None
33
+ return
34
+ last_usage_time[session_id] = now
35
+
36
+ response_so_far = ""
37
+
38
+ if model_name == "Codette Fine-Tuned (v9)":
39
+ try:
40
+ response = openai.ChatCompletion.create(
41
+ model="ft:gpt-4.1-2025-04-14:raiffs-bits:codette-final:BO907H7Z",
42
+ messages=[{"role": "user", "content": prompt}],
43
+ temperature=0.7,
44
+ max_tokens=256
45
+ )
46
+ output = response.choices[0].message.content.strip()
47
+ except Exception as e:
48
+ yield f"[OpenAI fine-tuned model error]: {e}", None, None
49
+ return
50
+ else:
51
+ if model_name not in text_model_cache:
52
+ try:
53
+ text_model_cache[model_name] = pipeline(
54
+ "text-generation",
55
+ model=AVAILABLE_MODELS[model_name],
56
+ device=0 if device == "cuda" else -1
57
+ )
58
+ except Exception as e:
59
+ yield f"[Text model error]: {e}", None, None
60
+ return
61
+
62
+ generator = text_model_cache[model_name]
63
+ try:
64
+ output = generator(prompt, max_length=100, do_sample=True, num_return_sequences=1)[0]['generated_text'].strip()
65
+ except Exception as e:
66
+ yield f"[Text generation error]: {e}", None, None
67
+ return
68
+
69
+ for char in output:
70
+ response_so_far += char
71
+ temp_log = chat_memory[session_id][:]
72
+ temp_log.append(f"πŸ–‹οΈ You > {prompt}")
73
+ temp_log.append(f"🧠 Codette > {response_so_far}")
74
+ yield "\n".join(temp_log[-10:]), None, None
75
+ time.sleep(0.01)
76
+
77
+ chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
78
+ chat_memory[session_id].append(f"🧠 Codette > {output}")
79
+
80
+ imgs = None
81
+ if generate_image and image_enabled:
82
+ try:
83
+ result = image_generator(prompt, num_images_per_prompt=batch_size)
84
+ imgs = result.images
85
+ except Exception as e:
86
+ response_so_far += f"\n[Image error]: {e}"
87
+
88
+ vid = None
89
+ if generate_video and video_enabled:
90
+ try:
91
+ result = video_pipeline(prompt, num_inference_steps=video_steps)
92
+ frames = result.frames
93
+ temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
94
+ imageio.mimsave(temp_video_path, frames, fps=fps)
95
+ vid = temp_video_path
96
+ except Exception as e:
97
+ response_so_far += f"\n[Video error]: {e}"
98
+
99
+ yield "\n".join(chat_memory[session_id][-10:]), imgs, vid