File size: 1,786 Bytes
c992e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from utils.self_awareness import SelfAwareness
from config import MODEL_NAME, DEVICE

class JAIRVISMKV:
    def __init__(self):
        self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE)
        self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
        self.self_awareness = SelfAwareness(self)
        self.conversation_context = []
        self.max_context_length = 1024

    def generate_response(self, prompt):
        self.conversation_context.append(f"Human: {prompt}")
        context = "\n".join(self.conversation_context[-5:])
        
        inputs = self.tokenizer.encode(context + "\nAI:", return_tensors="pt").to(DEVICE)
        attention_mask = torch.ones(inputs.shape, dtype=torch.long, device=DEVICE)
        
        outputs = self.model.generate(
            inputs,
            attention_mask=attention_mask,
            max_length=self.max_context_length,
            num_return_sequences=1,
            no_repeat_ngram_size=2,
            top_k=50,
            top_p=0.95,
            temperature=0.7
        )
        
        response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        ai_response = response.split("AI:")[-1].strip()
        
        self.conversation_context.append(f"AI: {ai_response}")
        self.self_awareness.update_self_model()
        
        return ai_response

    def introspect(self):
        return self.self_awareness.generate_insights(str(self))

    def implement_improvement(self, suggestion):
        return self.self_awareness.implement_improvement(suggestion)

    def __str__(self):
        return f"JAIRVISMKV Model: {MODEL_NAME}, Device: {DEVICE}"