the11's picture
Update llm.py
58cedb3 verified
raw
history blame contribute delete
957 Bytes
from dotenv import load_dotenv
import os
from groq import Groq
load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
if not GROQ_API_KEY:
raise RuntimeError(
"GROQ_API_KEY environment variable not set. "
"Please set it to your Groq API key."
)
groq_client = Groq(api_key=GROQ_API_KEY)
def get_llm_response(user_text, context, emotion, tone_instruction):
prompt = f"""
You are a helpful and emotionally aware assistant.
The user's emotional state is: {emotion}.
{tone_instruction}
Using the following context, answer the user's question:
---
{context}
---
Question: {user_text}
"""
completion = groq_client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_completion_tokens=1024,
top_p=1,
stream=False,
stop=None,
)
return completion.choices[0].message.content