Spaces:
Runtime error
Runtime error
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
from utils.self_awareness import SelfAwareness | |
from config import MODEL_NAME, DEVICE | |
class JAIRVISMKV: | |
def __init__(self): | |
self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE) | |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
self.self_awareness = SelfAwareness(self) | |
self.conversation_context = [] | |
self.max_context_length = 1024 | |
def generate_response(self, prompt): | |
self.conversation_context.append(f"Human: {prompt}") | |
context = "\n".join(self.conversation_context[-5:]) | |
inputs = self.tokenizer.encode(context + "\nAI:", return_tensors="pt").to(DEVICE) | |
attention_mask = torch.ones(inputs.shape, dtype=torch.long, device=DEVICE) | |
outputs = self.model.generate( | |
inputs, | |
attention_mask=attention_mask, | |
max_length=self.max_context_length, | |
num_return_sequences=1, | |
no_repeat_ngram_size=2, | |
top_k=50, | |
top_p=0.95, | |
temperature=0.7 | |
) | |
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
ai_response = response.split("AI:")[-1].strip() | |
self.conversation_context.append(f"AI: {ai_response}") | |
self.self_awareness.update_self_model() | |
return ai_response | |
def introspect(self): | |
return self.self_awareness.generate_insights(str(self)) | |
def implement_improvement(self, suggestion): | |
return self.self_awareness.implement_improvement(suggestion) | |
def __str__(self): | |
return f"JAIRVISMKV Model: {MODEL_NAME}, Device: {DEVICE}" | |