Update app.py
#6
by
Vedant-acharya
- opened
app.py
CHANGED
@@ -263,11 +263,14 @@ hf_token = os.getenv("HF_TOKEN")
|
|
263 |
gemini_token = os.getenv("GEMINI_TOKEN")
|
264 |
|
265 |
models = {
|
|
|
|
|
266 |
"llama3.1": "llama-3.1-8b-instant",
|
267 |
-
"mistral": "mistral-saba-24b",
|
268 |
"llama3.3": "llama-3.3-70b-versatile",
|
269 |
-
"
|
270 |
-
"
|
|
|
|
|
271 |
}
|
272 |
|
273 |
self_path = os.path.dirname(os.path.abspath(__file__))
|
@@ -419,10 +422,18 @@ with st.sidebar:
|
|
419 |
|
420 |
# Filter available models
|
421 |
available_models = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
422 |
if Groq_Token and Groq_Token.strip():
|
423 |
-
available_models.extend(
|
424 |
if gemini_token and gemini_token.strip():
|
425 |
-
available_models.
|
426 |
|
427 |
if not available_models:
|
428 |
st.error("β No API keys available! Please set up your API keys in the .env file")
|
@@ -437,11 +448,17 @@ with st.sidebar:
|
|
437 |
# Model descriptions
|
438 |
model_descriptions = {
|
439 |
"llama3.1": "π¦ Fast and efficient for general queries",
|
440 |
-
"llama3.3": "π¦ Most advanced
|
441 |
"mistral": "β‘ Balanced performance and speed",
|
442 |
"gemma": "π Google's lightweight model",
|
443 |
-
"gemini-pro": "π§ Google's most powerful model"
|
|
|
|
|
|
|
|
|
|
|
444 |
}
|
|
|
445 |
|
446 |
if model_name in model_descriptions:
|
447 |
st.info(model_descriptions[model_name])
|
|
|
263 |
gemini_token = os.getenv("GEMINI_TOKEN")
|
264 |
|
265 |
models = {
|
266 |
+
"gpt-oss-20b": "openai/gpt-oss-20b",
|
267 |
+
"gpt-oss-120b": "openai/gpt-oss-120b",
|
268 |
"llama3.1": "llama-3.1-8b-instant",
|
|
|
269 |
"llama3.3": "llama-3.3-70b-versatile",
|
270 |
+
"deepseek-R1": "deepseek-r1-distill-llama-70b",
|
271 |
+
"llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
|
272 |
+
"llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct",
|
273 |
+
"gemini-pro": "gemini-1.5-pro"
|
274 |
}
|
275 |
|
276 |
self_path = os.path.dirname(os.path.abspath(__file__))
|
|
|
422 |
|
423 |
# Filter available models
|
424 |
available_models = []
|
425 |
+
model_names = list(models.keys())
|
426 |
+
groq_models = []
|
427 |
+
gemini_models = []
|
428 |
+
for model_name in model_names:
|
429 |
+
if "gemini" not in model_name:
|
430 |
+
groq_models.append(model_name)
|
431 |
+
else:
|
432 |
+
gemini_models.append(model_name)
|
433 |
if Groq_Token and Groq_Token.strip():
|
434 |
+
available_models.extend(groq_models)
|
435 |
if gemini_token and gemini_token.strip():
|
436 |
+
available_models.extend(gemini_models)
|
437 |
|
438 |
if not available_models:
|
439 |
st.error("β No API keys available! Please set up your API keys in the .env file")
|
|
|
448 |
# Model descriptions
|
449 |
model_descriptions = {
|
450 |
"llama3.1": "π¦ Fast and efficient for general queries",
|
451 |
+
"llama3.3": "π¦ Most advanced LLaMA model for complex reasoning",
|
452 |
"mistral": "β‘ Balanced performance and speed",
|
453 |
"gemma": "π Google's lightweight model",
|
454 |
+
"gemini-pro": "π§ Google's most powerful model",
|
455 |
+
"gpt-oss-20b": "π OpenAI's compact open-weight GPT for everyday tasks",
|
456 |
+
"gpt-oss-120b": "π OpenAI's massive open-weight GPT for nuanced responses",
|
457 |
+
"deepseek-R1": "π DeepSeek's distilled LLaMA model for efficient reasoning",
|
458 |
+
"llama4 maverik": "π Meta's LLaMA 4 Maverick β high-performance instruction model",
|
459 |
+
"llama4 scout": "π°οΈ Meta's LLaMA 4 Scout β optimized for adaptive reasoning"
|
460 |
}
|
461 |
+
|
462 |
|
463 |
if model_name in model_descriptions:
|
464 |
st.info(model_descriptions[model_name])
|