File size: 2,718 Bytes
45fc827
d0b4c52
 
eefd5a8
9c80cd1
5dbaeff
45fc827
d0b4c52
 
 
 
 
 
45fc827
 
 
 
 
 
d0b4c52
 
 
 
 
 
 
5985025
d0b4c52
 
 
5985025
d0b4c52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45fc827
d0b4c52
45fc827
d0b4c52
 
 
45fc827
d0b4c52
45fc827
 
 
 
 
d0b4c52
45fc827
d0b4c52
 
45fc827
d0b4c52
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import gradio as gr
import openai
import os
from codette_core import Code7eCQURE
from codette_agents import MedicalAgent, GovernmentAgent, SocialAgent, EconomicAgent, MisinfoAgent
from codette_trust import trust_calibration, weighted_consensus

openai.api_key = os.getenv("OPENAI_API_KEY")

# Initialize Codette Local Core
codette_cqure = Code7eCQURE(
    perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
    ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
    spiderweb_dim=5,
    memory_path="quantum_cocoon.json",
    recursion_depth=4,
    quantum_fluctuation=0.07
)

agents = [
    MedicalAgent("MedicalAI", "Newton", 1.0),
    GovernmentAgent("GovAI", "Policy", 0.9),
    SocialAgent("SocialAI", "Emotion", 0.95),
    EconomicAgent("EconAI", "Resources", 0.92),
    MisinfoAgent("MisinfoAI", "Chaos", 0.1)
]

def ask_codette(prompt, consent, dynamic_rec, use_finetune):
    if not consent:
        return "User consent required."

    if use_finetune:
        try:
            response = openai.ChatCompletion.create(
                model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
                messages=[
                    {"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.7
            )
            return response['choices'][0]['message']['content']
        except Exception as e:
            return f"Error from API: {str(e)}"
    else:
        proposals = [agent.propose(prompt) for agent in agents]
        outcome = codette_cqure.recursive_universal_reasoning(
            " | ".join(proposals),
            user_consent=consent,
            dynamic_recursion=dynamic_rec
        )
        return f"Ethical Outcome (Local): {outcome}"

description_text = """Codette is a sovereign modular AI.

This demo lets you choose:
- 🧠 Local reasoning core (Code7eCQURE)
- ☁️ Fine-tuned GPT-4.1 model: Codette v5 @ step 220

She draws from Newtonian logic, Da Vinci creativity, ethical frameworks, emotion, and memory cocooning.
"""

demo = gr.Interface(
    fn=ask_codette,
    inputs=[
        gr.Textbox(label="Ask Codette a Scenario"),
        gr.Checkbox(label="User Consent", value=True),
        gr.Checkbox(label="Enable Dynamic Recursion", value=True),
        gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False)
    ],
    outputs=gr.Textbox(label="Codette's Response", lines=12),
    title="Codette Hybrid AI (v5 FT @ Step 220)",
    description=description_text
)

if __name__ == "__main__":
    demo.launch()