mindspark121 commited on
Commit
97b87f4
·
verified ·
1 Parent(s): ef0b933

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -21
app.py CHANGED
@@ -36,28 +36,7 @@ class SummaryRequest(BaseModel):
36
  chat_history: list # List of messages
37
 
38
 
39
- # Load Local LLM (Mistral or Llama)
40
- model_name = "mistralai/Mistral-7B-Instruct-v0.3"
41
- tokenizer = AutoTokenizer.from_pretrained(model_name)
42
- model = AutoModelForCausalLM.from_pretrained(model_name)
43
 
44
- def generate_local_emotional_response(user_input, questions):
45
- """Generate emotional responses locally using LLaMA/Mistral."""
46
- prompt = f"User: {user_input}\n\nBased on this, respond in an empathetic way before asking each question:\n1. {questions[0]}\n2. {questions[1]}\n3. {questions[2]}"
47
- inputs = tokenizer(prompt, return_tensors="pt")
48
- output = model.generate(**inputs, max_length=200)
49
- return tokenizer.decode(output[0], skip_special_tokens=True).split("\n")
50
-
51
- @app.post("/get_questions")
52
- def get_recommended_questions(request: ChatRequest):
53
- input_embedding = embedding_model.encode([request.message], convert_to_numpy=True)
54
- distances, indices = question_index.search(input_embedding, 3)
55
- retrieved_questions = [questions_df["Questions"].iloc[i] for i in indices[0]]
56
-
57
- # Generate dynamic emotional responses locally
58
- enhanced_responses = generate_local_emotional_response(request.message, retrieved_questions)
59
-
60
- return {"questions": enhanced_responses}
61
 
62
  @app.post("/summarize_chat")
63
  def summarize_chat(request: SummaryRequest):
 
36
  chat_history: list # List of messages
37
 
38
 
 
 
 
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  @app.post("/summarize_chat")
42
  def summarize_chat(request: SummaryRequest):